summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-10-20 17:49:10 -0700
committerJakub Kicinski <kuba@kernel.org>2022-10-20 17:49:10 -0700
commit94adb5e29e0e583283183dbaa852ee9d7d0c4c26 (patch)
tree3038a63f3ed9632711100f1187c4c53c304b785b
parent36875a063b5e3618b42f7bace850473bb88a7c24 (diff)
parent6d36c728bc2e2d632f4b0dea00df5532e20dfdab (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst23
-rw-r--r--Documentation/admin-guide/device-mapper/verity.rst4
-rw-r--r--Documentation/arm64/silicon-errata.rst2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml5
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml39
-rw-r--r--Documentation/devicetree/bindings/leds/mediatek,mt6370-indicator.yaml5
-rw-r--r--Documentation/devicetree/bindings/mfd/mediatek,mt6370.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml1
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml5
-rw-r--r--Documentation/devicetree/bindings/riscv/microchip.yaml24
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive,ccache0.yaml (renamed from Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml)28
-rw-r--r--Documentation/devicetree/bindings/timer/sifive,clint.yaml18
-rw-r--r--Documentation/filesystems/ubifs.rst2
-rw-r--r--Documentation/mm/page_owner.rst20
-rw-r--r--Documentation/networking/filter.rst2
-rw-r--r--Documentation/process/howto.rst2
-rw-r--r--Documentation/riscv/index.rst1
-rw-r--r--Documentation/riscv/uabi.rst6
-rw-r--r--Documentation/tools/rtla/rtla-timerlat-top.rst2
-rw-r--r--Documentation/trace/ftrace.rst2
-rw-r--r--Documentation/translations/it_IT/process/howto.rst2
-rw-r--r--Documentation/translations/ja_JP/howto.rst2
-rw-r--r--Documentation/translations/ko_KR/howto.rst2
-rw-r--r--Documentation/translations/zh_CN/arch.rst29
-rw-r--r--Documentation/translations/zh_CN/devicetree/changesets.rst2
-rw-r--r--Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst2
-rw-r--r--Documentation/translations/zh_CN/devicetree/kernel-api.rst2
-rw-r--r--Documentation/translations/zh_CN/devicetree/overlay-notes.rst2
-rw-r--r--Documentation/translations/zh_CN/index.rst169
-rw-r--r--Documentation/translations/zh_CN/mm/ksm.rst2
-rw-r--r--Documentation/translations/zh_CN/mm/page_owner.rst10
-rw-r--r--Documentation/translations/zh_CN/process/howto.rst2
-rw-r--r--Documentation/translations/zh_CN/process/index.rst1
-rw-r--r--Documentation/translations/zh_TW/process/howto.rst2
-rw-r--r--MAINTAINERS38
-rw-r--r--Makefile6
-rw-r--r--arch/alpha/kernel/core_marvel.c2
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/signal.c2
-rw-r--r--arch/arm/mach-mmp/devices.c8
-rw-r--r--arch/arm/mach-spear/generic.h3
-rw-r--r--arch/arm/mach-spear/spear3xx.c1
-rw-r--r--arch/arm/mach-spear/spear6xx.c9
-rw-r--r--arch/arm64/Kconfig17
-rw-r--r--arch/arm64/include/asm/cputype.h4
-rw-r--r--arch/arm64/kernel/cpu_errata.c5
-rw-r--r--arch/arm64/kernel/mte.c9
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/proton-pack.c6
-rw-r--r--arch/arm64/kernel/syscall.c2
-rw-r--r--arch/arm64/mm/mteswap.c7
-rw-r--r--arch/arm64/tools/sysreg2
-rw-r--r--arch/loongarch/include/asm/pgtable.h3
-rw-r--r--arch/loongarch/kernel/process.c2
-rw-r--r--arch/loongarch/kernel/vdso.c2
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/vdso.c2
-rw-r--r--arch/openrisc/kernel/dma.c16
-rw-r--r--arch/parisc/include/asm/alternative.h21
-rw-r--r--arch/parisc/include/asm/pdc.h3
-rw-r--r--arch/parisc/include/asm/pgtable.h7
-rw-r--r--arch/parisc/kernel/alternative.c7
-rw-r--r--arch/parisc/kernel/entry.S8
-rw-r--r--arch/parisc/kernel/pdc_cons.c240
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/setup.c6
-rw-r--r--arch/parisc/kernel/sys_parisc.c4
-rw-r--r--arch/parisc/kernel/traps.c15
-rw-r--r--arch/parisc/kernel/vdso.c2
-rw-r--r--arch/powerpc/crypto/crc-vpmsum_test.c2
-rw-r--r--arch/powerpc/include/asm/syscalls.h16
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/interrupt_64.S15
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c38
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl16
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c21
-rw-r--r--arch/powerpc/platforms/pseries/Makefile3
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c151
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/Makefile2
-rw-r--r--arch/riscv/boot/dts/microchip/Makefile2
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi43
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts18
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-m100pfs-fabric.dtsi45
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-m100pfsevp.dts179
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi29
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi45
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-sev-kit.dts145
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs.dtsi31
-rw-r--r--arch/riscv/errata/thead/errata.c14
-rw-r--r--arch/riscv/include/asm/cacheflush.h2
-rw-r--r--arch/riscv/include/asm/elf.h4
-rw-r--r--arch/riscv/include/asm/io.h16
-rw-r--r--arch/riscv/include/asm/mmu.h1
-rw-r--r--arch/riscv/include/uapi/asm/auxvec.h4
-rw-r--r--arch/riscv/kernel/cpu.c51
-rw-r--r--arch/riscv/kernel/cpufeature.c39
-rw-r--r--arch/riscv/kernel/setup.c4
-rw-r--r--arch/riscv/kernel/sys_riscv.c3
-rw-r--r--arch/riscv/kernel/traps.c9
-rw-r--r--arch/riscv/kernel/vdso.c13
-rw-r--r--arch/riscv/mm/fault.c3
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/mm/mmap.c2
-rw-r--r--arch/sparc/vdso/vma.c2
-rw-r--r--arch/um/drivers/chan.h1
-rw-r--r--arch/um/drivers/mconsole_kern.c9
-rw-r--r--arch/um/drivers/mmapper_kern.c2
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/um/drivers/ssl.c2
-rw-r--r--arch/um/drivers/stdio_console.c2
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/um/drivers/vector_kern.c2
-rw-r--r--arch/um/drivers/virt-pci.c4
-rw-r--r--arch/um/drivers/virtio_uml.c71
-rw-r--r--arch/um/kernel/physmem.c2
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/um_arch.c14
-rw-r--r--arch/um/kernel/umid.c2
-rw-r--r--arch/x86/entry/vdso/vma.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/module.c2
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/mm/pat/cpa-test.c4
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-crypto-fallback.c2
-rw-r--r--block/blk-wbt.c3
-rw-r--r--block/genhd.c7
-rw-r--r--crypto/async_tx/raid6test.c2
-rw-r--r--crypto/testmgr.c94
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_brcm.c2
-rw-r--r--drivers/ata/ahci_imx.c4
-rw-r--r--drivers/ata/ahci_qoriq.c2
-rw-r--r--drivers/ata/ahci_st.c2
-rw-r--r--drivers/ata/ahci_xgene.c2
-rw-r--r--drivers/ata/sata_rcar.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c4
-rw-r--r--drivers/block/zram/zram_drv.c26
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c2
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/clk/at91/clk-generated.c5
-rw-r--r--drivers/clk/at91/clk-master.c9
-rw-r--r--drivers/clk/at91/clk-peripheral.c4
-rw-r--r--drivers/clk/clk-composite.c6
-rw-r--r--drivers/clk/clk-divider.c20
-rw-r--r--drivers/clk/clk.c286
-rw-r--r--drivers/clk/clk_test.c1569
-rw-r--r--drivers/clk/mediatek/clk-mux.c10
-rw-r--r--drivers/clk/qcom/clk-rcg2.c9
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c11
-rw-r--r--drivers/clk/spear/spear3xx_clock.c1
-rw-r--r--drivers/clk/spear/spear6xx_clock.c1
-rw-r--r--drivers/clk/tegra/clk-tegra114.c1
-rw-r--r--drivers/clk/tegra/clk-tegra124.c1
-rw-r--r--drivers/clk/tegra/clk-tegra20.c1
-rw-r--r--drivers/clk/tegra/clk-tegra210.c1
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1
-rw-r--r--drivers/dax/hmem/device.c4
-rw-r--r--drivers/dax/super.c6
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/edac/sifive_edac.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c165
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c63
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c73
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h140
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h3
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c78
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c108
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c2
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c2
-rw-r--r--drivers/i3c/master.c7
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c4
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c3
-rw-r--r--drivers/leds/leds-pca963x.c22
-rw-r--r--drivers/md/bcache/request.c2
-rw-r--r--drivers/md/dm-bufio.c13
-rw-r--r--drivers/md/dm-cache-policy.h2
-rw-r--r--drivers/md/dm-clone-target.c2
-rw-r--r--drivers/md/dm-ioctl.c78
-rw-r--r--drivers/md/dm-raid.c4
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-stats.c2
-rw-r--r--drivers/md/dm-table.c78
-rw-r--r--drivers/md/dm-verity-target.c18
-rw-r--r--drivers/md/dm.c9
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.c4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-touch-cap.c6
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2.c2
-rw-r--r--drivers/mmc/core/block.c6
-rw-r--r--drivers/mmc/core/card.h6
-rw-r--r--drivers/mmc/core/core.c4
-rw-r--r--drivers/mmc/core/quirks.h6
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c21
-rw-r--r--drivers/mmc/host/sdhci-sprd.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mtd/nand/raw/nandsim.c8
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c12
-rw-r--r--drivers/mtd/tests/speedtest.c2
-rw-r--r--drivers/mtd/tests/stresstest.c19
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/mtd/ubi/build.c14
-rw-r--r--drivers/mtd/ubi/cdev.c4
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/debug.h6
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/fastmap.c10
-rw-r--r--drivers/mtd/ubi/io.c2
-rw-r--r--drivers/mtd/ubi/ubi-media.h2
-rw-r--r--drivers/mtd/ubi/ubi.h9
-rw-r--r--drivers/mtd/ubi/vmt.c4
-rw-r--r--drivers/mtd/ubi/wl.c8
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c83
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c11
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c17
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c15
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c12
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c58
-rw-r--r--drivers/net/ethernet/sfc/filter.h4
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hyperv/rndis_filter.c6
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/at803x.c2
-rw-r--r--drivers/net/phy/dp83822.c3
-rw-r--r--drivers/net/phy/dp83867.c8
-rw-r--r--drivers/net/phy/phylink.c3
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c2
-rw-r--r--drivers/net/wwan/wwan_hwsim.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c24
-rw-r--r--drivers/nvdimm/region_devs.c10
-rw-r--r--drivers/nvdimm/security.c2
-rw-r--r--drivers/nvme/common/auth.c2
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/parisc/eisa_enumerator.c8
-rw-r--r--drivers/pci/setup-bus.c62
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c4
-rw-r--r--drivers/perf/riscv_pmu_sbi.c7
-rw-r--r--drivers/rtc/Kconfig1
-rw-r--r--drivers/rtc/rtc-cmos.c29
-rw-r--r--drivers/rtc/rtc-ds1685.c2
-rw-r--r--drivers/rtc/rtc-gamecube.c11
-rw-r--r--drivers/rtc/rtc-isl12022.c161
-rw-r--r--drivers/rtc/rtc-jz4740.c25
-rw-r--r--drivers/rtc/rtc-mpfs.c26
-rw-r--r--drivers/rtc/rtc-mxc.c27
-rw-r--r--drivers/rtc/rtc-rv3028.c5
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-ti-k3.c56
-rw-r--r--drivers/s390/char/vmur.c37
-rw-r--r--drivers/s390/char/vmur.h2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c4
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c6
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/soc/sifive/Kconfig6
-rw-r--r--drivers/soc/sifive/Makefile2
-rw-r--r--drivers/soc/sifive/sifive_ccache.c255
-rw-r--r--drivers/soc/sifive/sifive_l2_cache.c237
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c2
-rw-r--r--drivers/thunderbolt/xdomain.c2
-rw-r--r--drivers/tty/serial/Kconfig15
-rw-r--r--drivers/video/fbdev/stifb.c2
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--fs/btrfs/backref.c84
-rw-r--r--fs/btrfs/backref.h1
-rw-r--r--fs/btrfs/block-group.c2
-rw-r--r--fs/btrfs/extent-io-tree.c15
-rw-r--r--fs/btrfs/send.c5
-rw-r--r--fs/btrfs/send.h5
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mdsmap.c2
-rw-r--r--fs/cifs/cached_dir.c469
-rw-r--r--fs/cifs/cached_dir.h20
-rw-r--r--fs/cifs/cifs_ioctl.h8
-rw-r--r--fs/cifs/cifsfs.c9
-rw-r--r--fs/cifs/cifsglob.h48
-rw-r--r--fs/cifs/cifsproto.h13
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/connect.c22
-rw-r--r--fs/cifs/dir.c32
-rw-r--r--fs/cifs/file.c45
-rw-r--r--fs/cifs/inode.c176
-rw-r--r--fs/cifs/ioctl.c25
-rw-r--r--fs/cifs/link.c107
-rw-r--r--fs/cifs/readdir.c31
-rw-r--r--fs/cifs/sess.c34
-rw-r--r--fs/cifs/smb1ops.c56
-rw-r--r--fs/cifs/smb2file.c127
-rw-r--r--fs/cifs/smb2inode.c170
-rw-r--r--fs/cifs/smb2misc.c2
-rw-r--r--fs/cifs/smb2ops.c149
-rw-r--r--fs/cifs/smb2pdu.c75
-rw-r--r--fs/cifs/smb2pdu.h3
-rw-r--r--fs/cifs/smb2proto.h25
-rw-r--r--fs/erofs/fscache.c3
-rw-r--r--fs/erofs/zdata.c17
-rw-r--r--fs/erofs/zdata.h6
-rw-r--r--fs/erofs/zmap.c22
-rw-r--r--fs/exfat/inode.c2
-rw-r--r--fs/ext2/ialloc.c3
-rw-r--r--fs/ext4/ialloc.c7
-rw-r--r--fs/ext4/ioctl.c4
-rw-r--r--fs/ext4/mmp.c2
-rw-r--r--fs/ext4/super.c7
-rw-r--r--fs/ext4/verity.c3
-rw-r--r--fs/f2fs/gc.c2
-rw-r--r--fs/f2fs/namei.c2
-rw-r--r--fs/f2fs/segment.c8
-rw-r--r--fs/f2fs/verity.c3
-rw-r--r--fs/fat/inode.c2
-rw-r--r--fs/hostfs/hostfs_kern.c2
-rw-r--r--fs/nfsd/nfs4state.c4
-rw-r--r--fs/ntfs3/fslog.c6
-rw-r--r--fs/ubifs/crypto.c11
-rw-r--r--fs/ubifs/debug.c10
-rw-r--r--fs/ubifs/dir.c27
-rw-r--r--fs/ubifs/journal.c30
-rw-r--r--fs/ubifs/lpt_commit.c14
-rw-r--r--fs/ubifs/tnc_commit.c2
-rw-r--r--fs/ubifs/ubifs.h2
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c2
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c4
-rw-r--r--fs/xfs/xfs_error.c2
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/cgroup-defs.h3
-rw-r--r--include/linux/cgroup.h6
-rw-r--r--include/linux/clk-provider.h18
-rw-r--r--include/linux/clk.h2
-rw-r--r--include/linux/clk/at91_pmc.h6
-rw-r--r--include/linux/clk/spear.h14
-rw-r--r--include/linux/cpumask.h19
-rw-r--r--include/linux/damon.h6
-rw-r--r--include/linux/dsa/tag_qca.h8
-rw-r--r--include/linux/io_uring_types.h5
-rw-r--r--include/linux/memremap.h1
-rw-r--r--include/linux/migrate.h15
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/netdevice.h10
-rw-r--r--include/linux/nodemask.h2
-rw-r--r--include/linux/phylink.h2
-rw-r--r--include/linux/prandom.h12
-rw-r--r--include/linux/psi.h12
-rw-r--r--include/linux/psi_types.h31
-rw-r--r--include/linux/random.h5
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/slab_def.h1
-rw-r--r--include/net/genetlink.h8
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/red.h2
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/sock_reuseport.h11
-rw-r--r--include/soc/sifive/sifive_ccache.h16
-rw-r--r--include/soc/sifive/sifive_l2_cache.h16
-rw-r--r--include/sound/hdaudio.h8
-rw-r--r--include/uapi/mtd/ubi-user.h8
-rw-r--r--io_uring/fdinfo.c2
-rw-r--r--io_uring/io_uring.c33
-rw-r--r--io_uring/io_uring.h18
-rw-r--r--io_uring/net.c28
-rw-r--r--io_uring/opdef.c1
-rw-r--r--io_uring/rsrc.c1
-rw-r--r--io_uring/rw.c46
-rw-r--r--io_uring/tctx.c42
-rw-r--r--io_uring/tctx.h6
-rw-r--r--kernel/bpf/bloom_filter.c2
-rw-r--r--kernel/bpf/cgroup_iter.c2
-rw-r--r--kernel/bpf/core.c6
-rw-r--r--kernel/bpf/hashtab.c2
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/cgroup/cgroup.c203
-rw-r--r--kernel/kcsan/selftest.c4
-rw-r--r--kernel/locking/test-ww_mutex.c4
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/psi.c280
-rw-r--r--kernel/sched/stats.h6
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/Kconfig.kgdb2
-rw-r--r--lib/cmdline_kunit.c4
-rw-r--r--lib/fault-inject.c2
-rw-r--r--lib/find_bit_benchmark.c4
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/random32.c4
-rw-r--r--lib/reed_solomon/test_rslib.c12
-rw-r--r--lib/sbitmap.c4
-rw-r--r--lib/test-string_helpers.c2
-rw-r--r--lib/test_fprobe.c2
-rw-r--r--lib/test_hexdump.c10
-rw-r--r--lib/test_hmm.c129
-rw-r--r--lib/test_hmm_uapi.h1
-rw-r--r--lib/test_kprobes.c2
-rw-r--r--lib/test_list_sort.c2
-rw-r--r--lib/test_meminit.c21
-rw-r--r--lib/test_min_heap.c6
-rw-r--r--lib/test_objagg.c2
-rw-r--r--lib/test_rhashtable.c6
-rw-r--r--lib/test_vmalloc.c19
-rw-r--r--lib/uuid.c2
-rw-r--r--mm/compaction.c1
-rw-r--r--mm/damon/core.c26
-rw-r--r--mm/damon/vaddr.c4
-rw-r--r--mm/highmem.c43
-rw-r--r--mm/hugetlb.c72
-rw-r--r--mm/kasan/kasan_test.c15
-rw-r--r--mm/memory.c20
-rw-r--r--mm/memremap.c30
-rw-r--r--mm/migrate.c34
-rw-r--r--mm/migrate_device.c239
-rw-r--r--mm/mmap.c28
-rw-r--r--mm/mmu_gather.c10
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/page_alloc.c12
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slab.c39
-rw-r--r--mm/slub.c2
-rw-r--r--net/802/garp.c2
-rw-r--r--net/802/mrp.c2
-rw-r--r--net/atm/mpoa_proc.c3
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/pktgen.c47
-rw-r--r--net/core/skmsg.c8
-rw-r--r--net/core/sock_reuseport.c16
-rw-r--r--net/core/stream.c2
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/hsr/hsr_forward.c12
-rw-r--r--net/ipv4/datagram.c4
-rw-r--r--net/ipv4/igmp.c6
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c1
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c1
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp_cdg.c2
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/ip6_flowlabel.c2
-rw-r--r--net/ipv6/mcast.c10
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c1
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c2
-rw-r--r--net/ipv6/output_core.c2
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/scan.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_twos.c4
-rw-r--r--net/netfilter/nf_nat_core.c4
-rw-r--r--net/netfilter/nf_tables_api.c5
-rw-r--r--net/netfilter/xt_statistic.c2
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rds/bind.c2
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_sample.c2
-rw-r--r--net/sched/sch_api.c5
-rw-r--r--net/sched/sch_cake.c12
-rw-r--r--net/sched/sch_fq_codel.c25
-rw-r--r--net/sched/sch_netem.c22
-rw-r--r--net/sched/sch_pie.c2
-rw-r--r--net/sched/sch_sfb.c5
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/smc/smc_core.c3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c4
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/tipc/topsrv.c2
-rw-r--r--net/tls/tls_strp.c32
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/unix/garbage.c20
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.modpost2
-rwxr-xr-xscripts/clang-tools/run-clang-tools.py11
-rwxr-xr-xscripts/package/mkspec2
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/sound_oss.c13
-rw-r--r--sound/pci/hda/cs35l41_hda.c197
-rw-r--r--sound/pci/hda/hda_component.h2
-rw-r--r--sound/pci/hda/hda_cs_dsp_ctl.c79
-rw-r--r--sound/pci/hda/hda_cs_dsp_ctl.h2
-rw-r--r--sound/pci/hda/patch_realtek.c30
-rw-r--r--sound/usb/card.h3
-rw-r--r--sound/usb/endpoint.c32
-rw-r--r--tools/arch/x86/include/asm/msr-index.h18
-rw-r--r--tools/lib/perf/include/perf/event.h5
-rw-r--r--tools/perf/arch/arm/util/auxtrace.c116
-rw-r--r--tools/perf/arch/arm/util/pmu.c3
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c2
-rw-r--r--tools/perf/arch/arm64/util/Build2
-rw-r--r--tools/perf/arch/arm64/util/hisi-ptt.c188
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c2
-rw-r--r--tools/perf/builtin-list.c2
-rw-r--r--tools/perf/builtin-mem.c8
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/attr/system-wide-dummy2
-rw-r--r--tools/perf/tests/attr/test-record-group4
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling6
-rw-r--r--tools/perf/tests/attr/test-record-group14
-rw-r--r--tools/perf/tests/attr/test-record-group24
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh43
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh43
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh2
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh385
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/auxtrace.c4
-rw-r--r--tools/perf/util/auxtrace.h1
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c29
-rw-r--r--tools/perf/util/genelf.h4
-rw-r--r--tools/perf/util/hisi-ptt-decoder/Build1
-rw-r--r--tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.c164
-rw-r--r--tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.h31
-rw-r--r--tools/perf/util/hisi-ptt.c192
-rw-r--r--tools/perf/util/hisi-ptt.h19
-rw-r--r--tools/perf/util/intel-pt.c9
-rw-r--r--tools/perf/util/parse-events.c3
-rw-r--r--tools/perf/util/pmu.c17
-rw-r--r--tools/perf/util/pmu.h2
-rw-r--r--tools/perf/util/pmu.l2
-rw-r--r--tools/perf/util/pmu.y15
-rw-r--r--tools/testing/selftests/net/Makefile1
-rw-r--r--tools/testing/selftests/net/test_ingress_egress_chaining.sh79
-rw-r--r--tools/testing/selftests/vm/hmm-tests.c49
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c22
696 files changed, 10273 insertions, 4807 deletions
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 7bcfb38498c6..dc254a3cb956 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -976,6 +976,29 @@ All cgroup core files are prefixed with "cgroup."
killing cgroups is a process directed operation, i.e. it affects
the whole thread-group.
+ cgroup.pressure
+ A read-write single value file that allowed values are "0" and "1".
+ The default is "1".
+
+ Writing "0" to the file will disable the cgroup PSI accounting.
+ Writing "1" to the file will re-enable the cgroup PSI accounting.
+
+ This control attribute is not hierarchical, so disable or enable PSI
+ accounting in a cgroup does not affect PSI accounting in descendants
+ and doesn't need pass enablement via ancestors from root.
+
+ The reason this control attribute exists is that PSI accounts stalls for
+ each cgroup separately and aggregates it at each level of the hierarchy.
+ This may cause non-negligible overhead for some workloads when under
+ deep level of the hierarchy, in which case this control attribute can
+ be used to disable PSI accounting in the non-leaf cgroups.
+
+ irq.pressure
+ A read-write nested-keyed file.
+
+ Shows pressure stall information for IRQ/SOFTIRQ. See
+ :ref:`Documentation/accounting/psi.rst <psi>` for details.
+
Controllers
===========
diff --git a/Documentation/admin-guide/device-mapper/verity.rst b/Documentation/admin-guide/device-mapper/verity.rst
index 1a6b91368e59..a65c1602cb23 100644
--- a/Documentation/admin-guide/device-mapper/verity.rst
+++ b/Documentation/admin-guide/device-mapper/verity.rst
@@ -141,6 +141,10 @@ root_hash_sig_key_desc <key_description>
also gain new certificates at run time if they are signed by a certificate
already in the secondary trusted keyring.
+try_verify_in_tasklet
+ If verity hashes are in cache, verify data blocks in kernel tasklet instead
+ of workqueue. This option can reduce IO latency.
+
Theory of operation
===================
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 17d9fc5d14fb..808ade4cc008 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -76,6 +76,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A55 | #1530923 | ARM64_ERRATUM_1530923 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A55 | #2441007 | ARM64_ERRATUM_2441007 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A57 | #852523 | N/A |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
index 92e0f8c3eff2..99e01f4d0a69 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
@@ -66,6 +66,11 @@ properties:
- enum:
- allwinner,sun20i-d1-plic
- const: thead,c900-plic
+ - items:
+ - const: sifive,plic-1.0.0
+ - const: riscv,plic0
+ deprecated: true
+ description: For the QEMU virt machine only
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index 328952d7acbb..3c14a98430e1 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -79,24 +79,27 @@ properties:
the LED.
$ref: /schemas/types.yaml#/definitions/string
- enum:
- # LED will act as a back-light, controlled by the framebuffer system
- - backlight
- # LED will turn on (but for leds-gpio see "default-state" property in
- # Documentation/devicetree/bindings/leds/leds-gpio.yaml)
- - default-on
- # LED "double" flashes at a load average based rate
- - heartbeat
- # LED indicates disk activity
- - disk-activity
- # LED indicates IDE disk activity (deprecated), in new implementations
- # use "disk-activity"
- - ide-disk
- # LED flashes at a fixed, configurable rate
- - timer
- # LED alters the brightness for the specified duration with one software
- # timer (requires "led-pattern" property)
- - pattern
+ oneOf:
+ - enum:
+ # LED will act as a back-light, controlled by the framebuffer system
+ - backlight
+ # LED will turn on (but for leds-gpio see "default-state" property in
+ # Documentation/devicetree/bindings/leds/leds-gpio.yaml)
+ - default-on
+ # LED "double" flashes at a load average based rate
+ - heartbeat
+ # LED indicates disk activity
+ - disk-activity
+ # LED indicates IDE disk activity (deprecated), in new implementations
+ # use "disk-activity"
+ - ide-disk
+ # LED flashes at a fixed, configurable rate
+ - timer
+ # LED alters the brightness for the specified duration with one software
+ # timer (requires "led-pattern" property)
+ - pattern
+ # LED is triggered by SD/MMC activity
+ - pattern: "^mmc[0-9]+$"
led-pattern:
description: |
diff --git a/Documentation/devicetree/bindings/leds/mediatek,mt6370-indicator.yaml b/Documentation/devicetree/bindings/leds/mediatek,mt6370-indicator.yaml
index 204b103ffc2c..16b3abc2af3a 100644
--- a/Documentation/devicetree/bindings/leds/mediatek,mt6370-indicator.yaml
+++ b/Documentation/devicetree/bindings/leds/mediatek,mt6370-indicator.yaml
@@ -13,9 +13,6 @@ description: |
This module is part of the MT6370 MFD device.
Add MT6370 LED driver include 4-channel RGB LED support Register/PWM/Breath Mode
-allOf:
- - $ref: leds-class-multicolor.yaml#
-
properties:
compatible:
const: mediatek,mt6370-indicator
@@ -29,6 +26,8 @@ properties:
patternProperties:
"^multi-led@[0-3]$":
type: object
+ $ref: leds-class-multicolor.yaml#
+ unevaluatedProperties: false
properties:
reg:
diff --git a/Documentation/devicetree/bindings/mfd/mediatek,mt6370.yaml b/Documentation/devicetree/bindings/mfd/mediatek,mt6370.yaml
index 250484d59ecd..5644882db2e8 100644
--- a/Documentation/devicetree/bindings/mfd/mediatek,mt6370.yaml
+++ b/Documentation/devicetree/bindings/mfd/mediatek,mt6370.yaml
@@ -139,8 +139,8 @@ examples:
charger {
compatible = "mediatek,mt6370-charger";
- interrupts = <48>, <68>, <6>;
- interrupt-names = "attach_i", "uvp_d_evt", "mivr";
+ interrupts = <68>, <48>, <6>;
+ interrupt-names = "uvp_d_evt", "attach_i", "mivr";
io-channels = <&mt6370_adc MT6370_CHAN_IBUS>;
mt6370_otg_vbus: usb-otg-vbus-regulator {
diff --git a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
index 64995cbb0f97..41c9760227cd 100644
--- a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
@@ -8,7 +8,6 @@ title: Samsung S3FWRN5 NCI NFC Controller
maintainers:
- Krzysztof Kozlowski <krzk@kernel.org>
- - Krzysztof Opasiak <k.opasiak@samsung.com>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index 873dd12f6e89..90a7cabf58fe 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -9,6 +9,7 @@ title: RISC-V bindings for 'cpus' DT nodes
maintainers:
- Paul Walmsley <paul.walmsley@sifive.com>
- Palmer Dabbelt <palmer@sifive.com>
+ - Conor Dooley <conor@kernel.org>
description: |
This document uses some terminology common to the RISC-V community
@@ -79,9 +80,7 @@ properties:
insensitive, letters in the riscv,isa string must be all
lowercase to simplify parsing.
$ref: "/schemas/types.yaml#/definitions/string"
- enum:
- - rv64imac
- - rv64imafdc
+ pattern: ^rv(?:64|32)imaf?d?q?c?b?v?k?h?(?:_[hsxz](?:[a-z])+)*$
# RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
timebase-frequency: false
diff --git a/Documentation/devicetree/bindings/riscv/microchip.yaml b/Documentation/devicetree/bindings/riscv/microchip.yaml
index 37f97ee4fe46..714d0fcab399 100644
--- a/Documentation/devicetree/bindings/riscv/microchip.yaml
+++ b/Documentation/devicetree/bindings/riscv/microchip.yaml
@@ -7,8 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Microchip PolarFire SoC-based boards
maintainers:
- - Cyril Jean <Cyril.Jean@microchip.com>
- - Lewis Hanly <lewis.hanly@microchip.com>
+ - Conor Dooley <conor.dooley@microchip.com>
+ - Daire McNamara <daire.mcnamara@microchip.com>
description:
Microchip PolarFire SoC-based boards
@@ -17,12 +17,20 @@ properties:
$nodename:
const: '/'
compatible:
- items:
- - enum:
- - microchip,mpfs-icicle-kit
- - microchip,mpfs-icicle-reference-rtlv2203
- - sundance,polarberry
- - const: microchip,mpfs
+ oneOf:
+ - items:
+ - enum:
+ - microchip,mpfs-icicle-reference-rtlv2203
+ - microchip,mpfs-icicle-reference-rtlv2210
+ - const: microchip,mpfs-icicle-kit
+ - const: microchip,mpfs
+
+ - items:
+ - enum:
+ - aries,m100pfsevp
+ - microchip,mpfs-sev-kit
+ - sundance,polarberry
+ - const: microchip,mpfs
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive,ccache0.yaml
index ca3b9be58058..bf3f07421f7e 100644
--- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
+++ b/Documentation/devicetree/bindings/riscv/sifive,ccache0.yaml
@@ -2,18 +2,18 @@
# Copyright (C) 2020 SiFive, Inc.
%YAML 1.2
---
-$id: http://devicetree.org/schemas/riscv/sifive-l2-cache.yaml#
+$id: http://devicetree.org/schemas/riscv/sifive,ccache0.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: SiFive L2 Cache Controller
+title: SiFive Composable Cache Controller
maintainers:
- Sagar Kadam <sagar.kadam@sifive.com>
- Paul Walmsley <paul.walmsley@sifive.com>
description:
- The SiFive Level 2 Cache Controller is used to provide access to fast copies
- of memory for masters in a Core Complex. The Level 2 Cache Controller also
+ The SiFive Composable Cache Controller is used to provide access to fast copies
+ of memory for masters in a Core Complex. The Composable Cache Controller also
acts as directory-based coherency manager.
All the properties in ePAPR/DeviceTree specification applies for this platform.
@@ -22,6 +22,7 @@ select:
compatible:
contains:
enum:
+ - sifive,ccache0
- sifive,fu540-c000-ccache
- sifive,fu740-c000-ccache
@@ -33,6 +34,7 @@ properties:
oneOf:
- items:
- enum:
+ - sifive,ccache0
- sifive,fu540-c000-ccache
- sifive,fu740-c000-ccache
- const: cache
@@ -45,7 +47,7 @@ properties:
const: 64
cache-level:
- const: 2
+ enum: [2, 3]
cache-sets:
enum: [1024, 2048]
@@ -115,6 +117,22 @@ allOf:
cache-sets:
const: 1024
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: sifive,ccache0
+
+ then:
+ properties:
+ cache-level:
+ enum: [2, 3]
+
+ else:
+ properties:
+ cache-level:
+ const: 2
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/timer/sifive,clint.yaml b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
index e64f46339079..bbad24165837 100644
--- a/Documentation/devicetree/bindings/timer/sifive,clint.yaml
+++ b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
@@ -22,12 +22,18 @@ description:
properties:
compatible:
- items:
- - enum:
- - sifive,fu540-c000-clint
- - starfive,jh7100-clint
- - canaan,k210-clint
- - const: sifive,clint0
+ oneOf:
+ - items:
+ - enum:
+ - sifive,fu540-c000-clint
+ - starfive,jh7100-clint
+ - canaan,k210-clint
+ - const: sifive,clint0
+ - items:
+ - const: sifive,clint0
+ - const: riscv,clint0
+ deprecated: true
+ description: For the QEMU virt machine only
description:
Should be "<vendor>,<chip>-clint" and "sifive,clint<version>".
diff --git a/Documentation/filesystems/ubifs.rst b/Documentation/filesystems/ubifs.rst
index e6ee99762534..ced2f7679ddb 100644
--- a/Documentation/filesystems/ubifs.rst
+++ b/Documentation/filesystems/ubifs.rst
@@ -59,7 +59,7 @@ differences.
* JFFS2 is a write-through file-system, while UBIFS supports write-back,
which makes UBIFS much faster on writes.
-Similarly to JFFS2, UBIFS supports on-the-flight compression which makes
+Similarly to JFFS2, UBIFS supports on-the-fly compression which makes
it possible to fit quite a lot of data to the flash.
Similarly to JFFS2, UBIFS is tolerant of unclean reboots and power-cuts.
diff --git a/Documentation/mm/page_owner.rst b/Documentation/mm/page_owner.rst
index f18fd8907049..127514955a5e 100644
--- a/Documentation/mm/page_owner.rst
+++ b/Documentation/mm/page_owner.rst
@@ -38,22 +38,10 @@ not affect to allocation performance, especially if the static keys jump
label patching functionality is available. Following is the kernel's code
size change due to this facility.
-- Without page owner::
-
- text data bss dec hex filename
- 48392 2333 644 51369 c8a9 mm/page_alloc.o
-
-- With page owner::
-
- text data bss dec hex filename
- 48800 2445 644 51889 cab1 mm/page_alloc.o
- 6662 108 29 6799 1a8f mm/page_owner.o
- 1025 8 8 1041 411 mm/page_ext.o
-
-Although, roughly, 8 KB code is added in total, page_alloc.o increase by
-520 bytes and less than half of it is in hotpath. Building the kernel with
-page owner and turning it on if needed would be great option to debug
-kernel memory problem.
+Although enabling page owner increases kernel size by several kilobytes,
+most of this code is outside page allocator and its hot path. Building
+the kernel with page owner and turning it on if needed would be great
+option to debug kernel memory problem.
There is one notice that is caused by implementation detail. page owner
stores information into the memory from struct page extension. This memory
diff --git a/Documentation/networking/filter.rst b/Documentation/networking/filter.rst
index 43cdc4d34745..f69da5074860 100644
--- a/Documentation/networking/filter.rst
+++ b/Documentation/networking/filter.rst
@@ -305,7 +305,7 @@ Possible BPF extensions are shown in the following table:
vlan_tci skb_vlan_tag_get(skb)
vlan_avail skb_vlan_tag_present(skb)
vlan_tpid skb->vlan_proto
- rand prandom_u32()
+ rand get_random_u32()
=================================== =================================================
These extensions can also be prefixed with '#'.
diff --git a/Documentation/process/howto.rst b/Documentation/process/howto.rst
index cd6997a9d203..bd15c393ba3c 100644
--- a/Documentation/process/howto.rst
+++ b/Documentation/process/howto.rst
@@ -379,7 +379,7 @@ to subscribe and unsubscribe from the list can be found at:
There are archives of the mailing list on the web in many different
places. Use a search engine to find these archives. For example:
- http://dir.gmane.org/gmane.linux.kernel
+ https://lore.kernel.org/lkml/
It is highly recommended that you search the archives about the topic
you want to bring up, before you post it to the list. A lot of things
diff --git a/Documentation/riscv/index.rst b/Documentation/riscv/index.rst
index e23b876ad6eb..2e5b18fbb145 100644
--- a/Documentation/riscv/index.rst
+++ b/Documentation/riscv/index.rst
@@ -8,6 +8,7 @@ RISC-V architecture
boot-image-header
vm-layout
patch-acceptance
+ uabi
features
diff --git a/Documentation/riscv/uabi.rst b/Documentation/riscv/uabi.rst
new file mode 100644
index 000000000000..21a82cfb6c4d
--- /dev/null
+++ b/Documentation/riscv/uabi.rst
@@ -0,0 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+RISC-V Linux User ABI
+=====================
+
+Misaligned accesses are supported in userspace, but they may perform poorly.
diff --git a/Documentation/tools/rtla/rtla-timerlat-top.rst b/Documentation/tools/rtla/rtla-timerlat-top.rst
index 1c321de1c171..7c4e4b109493 100644
--- a/Documentation/tools/rtla/rtla-timerlat-top.rst
+++ b/Documentation/tools/rtla/rtla-timerlat-top.rst
@@ -39,7 +39,7 @@ higher than *30 us*. It is also set to stop the session if a *Thread* timer
latency higher than *30 us* is hit. Finally, it is set to save the trace
buffer if the stop condition is hit::
- [root@alien ~]# rtla timerlat top -s 30 -t 30 -T
+ [root@alien ~]# rtla timerlat top -s 30 -T 30 -t
Timer Latency
0 00:00:59 | IRQ Timer Latency (us) | Thread Timer Latency (us)
CPU COUNT | cur min avg max | cur min avg max
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index b37dc19e4d40..60bceb018d6a 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -564,7 +564,7 @@ of ftrace. Here is a list of some of the key files:
start::
- trace_fd = open("trace_marker", WR_ONLY);
+ trace_fd = open("trace_marker", O_WRONLY);
Note: Writing into the trace_marker file can also initiate triggers
that are written into /sys/kernel/tracing/events/ftrace/print/trigger
diff --git a/Documentation/translations/it_IT/process/howto.rst b/Documentation/translations/it_IT/process/howto.rst
index 16ad5622d549..15c08aea1dfe 100644
--- a/Documentation/translations/it_IT/process/howto.rst
+++ b/Documentation/translations/it_IT/process/howto.rst
@@ -394,7 +394,7 @@ trovati al sito:
Ci sono diversi archivi della lista di discussione. Usate un qualsiasi motore
di ricerca per trovarli. Per esempio:
- http://dir.gmane.org/gmane.linux.kernel
+ https://lore.kernel.org/lkml/
É caldamente consigliata una ricerca in questi archivi sul tema che volete
sollevare, prima di pubblicarlo sulla lista. Molte cose sono già state
diff --git a/Documentation/translations/ja_JP/howto.rst b/Documentation/translations/ja_JP/howto.rst
index 649e2ff2a407..b47a682d8ded 100644
--- a/Documentation/translations/ja_JP/howto.rst
+++ b/Documentation/translations/ja_JP/howto.rst
@@ -410,7 +410,7 @@ https://bugzilla.kernel.org ã«è¡Œã£ã¦ãã ã•ã„。もã—今後ã®ãƒã‚°ãƒ¬ã
ã“ã®ãƒ¡ãƒ¼ãƒªãƒ³ã‚°ãƒªã‚¹ãƒˆã®ã‚¢ãƒ¼ã‚«ã‚¤ãƒ–㯠web 上ã®å¤šæ•°ã®å ´æ‰€ã«å­˜åœ¨ã—ã¾ã™ã€‚ã“
れらã®ã‚¢ãƒ¼ã‚«ã‚¤ãƒ–を探ã™ã«ã¯ã‚µãƒ¼ãƒã‚¨ãƒ³ã‚¸ãƒ³ã‚’使ã„ã¾ã—ょã†ã€‚例ãˆã°-
- http://dir.gmane.org/gmane.linux.kernel
+ https://lore.kernel.org/lkml/
リストã«æŠ•ç¨¿ã™ã‚‹å‰ã«ã™ã§ã«ãã®è©±é¡ŒãŒã‚¢ãƒ¼ã‚«ã‚¤ãƒ–ã«å­˜åœ¨ã™ã‚‹ã‹ã©ã†ã‹ã‚’検索
ã™ã‚‹ã“ã¨ã‚’是éžã‚„ã£ã¦ãã ã•ã„。多数ã®äº‹ãŒã™ã§ã«è©³ç´°ã«æ¸¡ã£ã¦è­°è«–ã•ã‚Œã¦ãŠ
diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst
index e43970584ca4..df53fafd1b10 100644
--- a/Documentation/translations/ko_KR/howto.rst
+++ b/Documentation/translations/ko_KR/howto.rst
@@ -386,7 +386,7 @@ https://bugzilla.kernel.org 를 ì²´í¬í•˜ê³ ìž í•  ìˆ˜ë„ ìžˆë‹¤; ì†Œìˆ˜ì˜ ì»¤
웹ìƒì˜ ë§Žì€ ë‹¤ë¥¸ ê³³ì—ë„ ë©”ì¼ë§ ë¦¬ìŠ¤íŠ¸ì˜ ì•„ì¹´ì´ë¸Œë“¤ì´ 있다.
ì´ëŸ¬í•œ ì•„ì¹´ì´ë¸Œë“¤ì„ 찾으려면 검색 ì—”ì§„ì„ ì‚¬ìš©í•˜ë¼. 예를 들어:
- http://dir.gmane.org/gmane.linux.kernel
+ https://lore.kernel.org/lkml/
ì—¬ëŸ¬ë¶„ì´ ìƒˆë¡œìš´ ë¬¸ì œì— ê´€í•´ ë¦¬ìŠ¤íŠ¸ì— ì˜¬ë¦¬ê¸° ì „ì— ë§í•˜ê³  ì‹¶ì€ ì£¼ì œì— ê´€í•œ
ê²ƒì„ ì•„ì¹´ì´ë¸Œì—ì„œ 먼저 찾아보기를 강력히 권장한다. ì´ë¯¸ ìƒì„¸í•˜ê²Œ í† ë¡ ëœ ë§Žì€
diff --git a/Documentation/translations/zh_CN/arch.rst b/Documentation/translations/zh_CN/arch.rst
new file mode 100644
index 000000000000..690e173d8b2a
--- /dev/null
+++ b/Documentation/translations/zh_CN/arch.rst
@@ -0,0 +1,29 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+处ç†å™¨ä½“系结构
+==============
+
+以下文档æ供了具体架构实现的编程细节。
+
+.. toctree::
+ :maxdepth: 2
+
+ mips/index
+ arm64/index
+ riscv/index
+ openrisc/index
+ parisc/index
+ loongarch/index
+
+TODOList:
+
+* arm/index
+* ia64/index
+* m68k/index
+* nios2/index
+* powerpc/index
+* s390/index
+* sh/index
+* sparc/index
+* x86/index
+* xtensa/index
diff --git a/Documentation/translations/zh_CN/devicetree/changesets.rst b/Documentation/translations/zh_CN/devicetree/changesets.rst
index 2ace05f3c377..3df1b03c5695 100644
--- a/Documentation/translations/zh_CN/devicetree/changesets.rst
+++ b/Documentation/translations/zh_CN/devicetree/changesets.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/Devicetree/changesets.rst
+:Original: Documentation/devicetree/changesets.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst b/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst
index 115190341305..6dfd946d7093 100644
--- a/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst
+++ b/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/Devicetree/dynamic-resolution-notes.rst
+:Original: Documentation/devicetree/dynamic-resolution-notes.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/devicetree/kernel-api.rst b/Documentation/translations/zh_CN/devicetree/kernel-api.rst
index 6aa3b685494e..2fb729368b40 100644
--- a/Documentation/translations/zh_CN/devicetree/kernel-api.rst
+++ b/Documentation/translations/zh_CN/devicetree/kernel-api.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/Devicetree/kernel-api.rst
+:Original: Documentation/devicetree/kernel-api.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/devicetree/overlay-notes.rst b/Documentation/translations/zh_CN/devicetree/overlay-notes.rst
index 1bd482cb0a1b..43e3c0bc5a9f 100644
--- a/Documentation/translations/zh_CN/devicetree/overlay-notes.rst
+++ b/Documentation/translations/zh_CN/devicetree/overlay-notes.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
.. include:: ../disclaimer-zh_CN.rst
-:Original: Documentation/Devicetree/overlay-notes.rst
+:Original: Documentation/devicetree/overlay-notes.rst
:翻译:
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index 2fc60e60feb4..ec99ef5fe990 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -26,165 +26,100 @@
顺便说下,中文文档也需è¦éµå®ˆå†…核编ç é£Žæ ¼ï¼Œé£Žæ ¼ä¸­ä¸­æ–‡å’Œè‹±æ–‡çš„主è¦ä¸åŒå°±æ˜¯ä¸­æ–‡
的字符标点å ç”¨ä¸¤ä¸ªè‹±æ–‡å­—符宽度, 所以,当英文è¦æ±‚ä¸è¦è¶…过æ¯è¡Œ100个字符时,
中文就ä¸è¦è¶…过50个字符。å¦å¤–,也è¦æ³¨æ„'-','=' 等符å·ä¸Žç›¸å…³æ ‡é¢˜çš„对é½ã€‚在将
-è¡¥ä¸æ交到社区之å‰ï¼Œä¸€å®šè¦è¿›è¡Œå¿…è¦çš„checkpatch.pl检查和编译测试。
+è¡¥ä¸æ交到社区之å‰ï¼Œä¸€å®šè¦è¿›è¡Œå¿…è¦çš„ ``checkpatch.pl`` 检查和编译测试。
-许å¯è¯æ–‡æ¡£
-----------
-
-下é¢çš„文档介ç»äº†Linux内核æºä»£ç çš„许å¯è¯ï¼ˆGPLv2)ã€å¦‚何在æºä»£ç æ ‘中正确标记
-å•ä¸ªæ–‡ä»¶çš„许å¯è¯ã€ä»¥åŠæŒ‡å‘完整许å¯è¯æ–‡æœ¬çš„链接。
+与Linux 内核社区一起工作
+------------------------
-* Documentation/translations/zh_CN/process/license-rules.rst
-
-用户文档
---------
-
-下é¢çš„手册是为内核用户编写的——å³é‚£äº›è¯•å›¾è®©å®ƒåœ¨ç»™å®šç³»ç»Ÿä¸Šä»¥æœ€ä½³æ–¹å¼å·¥ä½œçš„
-用户。
+与内核开å‘社区进行å作并将工作推å‘上游的基本指å—。
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
- admin-guide/index
-
-TODOList:
-
-* kbuild/index
+ process/development-process
+ process/submitting-patches
+ 行为准则 <process/code-of-conduct>
+ maintainer/index
+ 完整开å‘æµç¨‹æ–‡æ¡£ <process/index>
-固件相关文档
-------------
+内部API文档
+-----------
-下列文档æ述了内核需è¦çš„å¹³å°å›ºä»¶ç›¸å…³ä¿¡æ¯ã€‚
+å¼€å‘人员使用的内核内部交互接å£æ‰‹å†Œã€‚
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
- devicetree/index
+ core-api/index
+ driver-api/index
+ å†…æ ¸ä¸­çš„é” <locking/index>
TODOList:
-* firmware-guide/index
-
-应用程åºå¼€å‘人员文档
---------------------
-
-用户空间API手册涵盖了æ述应用程åºå¼€å‘人员å¯è§å†…核接å£æ–¹é¢çš„文档。
+* subsystem-apis
-TODOlist:
+å¼€å‘工具和æµç¨‹
+--------------
-* userspace-api/index
-
-内核开å‘简介
-------------
-
-这些手册包å«æœ‰å…³å¦‚何开å‘内核的整体信æ¯ã€‚内核社区éžå¸¸åºžå¤§ï¼Œä¸€å¹´ä¸‹æ¥æœ‰æ•°åƒå
-å¼€å‘人员åšå‡ºè´¡çŒ®ã€‚与任何大型社区一样,知é“如何完æˆä»»åŠ¡å°†ä½¿å¾—更改åˆå¹¶çš„过程
-å˜å¾—更加容易。
+为所有内核开å‘人员æ供有用信æ¯çš„å„ç§å…¶ä»–手册。
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
- process/index
- dev-tools/index
+ process/license-rules
doc-guide/index
+ dev-tools/index
+ dev-tools/testing-overview
kernel-hacking/index
- maintainer/index
TODOList:
* trace/index
* fault-injection/index
* livepatch/index
-* rust/index
-内核API文档
------------
+é¢å‘用户的文档
+--------------
-以下手册从内核开å‘人员的角度详细介ç»äº†ç‰¹å®šçš„内核å­ç³»ç»Ÿæ˜¯å¦‚何工作的。这里的
-大部分信æ¯éƒ½æ˜¯ç›´æŽ¥ä»Žå†…æ ¸æºä»£ç èŽ·å–的,并根æ®éœ€è¦æ·»åŠ è¡¥å……æ料(或者至少是在
-我们设法添加的时候——å¯èƒ½ä¸æ˜¯æ‰€æœ‰çš„都是有需è¦çš„)。
+下列手册针对
+希望内核在给定系统上以最佳方å¼å·¥ä½œçš„*用户*,
+和查找内核用户空间APIä¿¡æ¯çš„程åºå¼€å‘人员。
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
- core-api/index
- driver-api/index
- locking/index
- accounting/index
- cpu-freq/index
- iio/index
- infiniband/index
- power/index
- virt/index
- sound/index
- filesystems/index
- scheduler/index
- mm/index
- peci/index
- PCI/index
+ admin-guide/index
+ admin-guide/reporting-issues.rst
TODOList:
-* block/index
-* cdrom/index
-* ide/index
-* fb/index
-* fpga/index
-* hid/index
-* i2c/index
-* isdn/index
-* leds/index
-* netlabel/index
-* networking/index
-* pcmcia/index
-* target/index
-* timers/index
-* spi/index
-* w1/index
-* watchdog/index
-* input/index
-* hwmon/index
-* gpu/index
-* security/index
-* crypto/index
-* bpf/index
-* usb/index
-* scsi/index
-* misc-devices/index
-* mhi/index
-
-体系结构无关文档
-----------------
+* 内核构建系统 <kbuild/index>
+* 用户空间工具 <tools/index>
+* userspace-api/index
-TODOList:
+也å¯å‚考独立于内核文档的 `Linux 手册页 <https://www.kernel.org/doc/man-pages/>`_ 。
-* asm-annotations
+固件相关文档
+------------
-特定体系结构文档
-----------------
+下列文档æ述了内核需è¦çš„å¹³å°å›ºä»¶ç›¸å…³ä¿¡æ¯ã€‚
.. toctree::
:maxdepth: 2
- mips/index
- arm64/index
- riscv/index
- openrisc/index
- parisc/index
- loongarch/index
+ devicetree/index
TODOList:
-* arm/index
-* ia64/index
-* m68k/index
-* nios2/index
-* powerpc/index
-* s390/index
-* sh/index
-* sparc/index
-* x86/index
-* xtensa/index
+* firmware-guide/index
+
+体系结构文档
+------------
+
+.. toctree::
+ :maxdepth: 2
+
+ arch
其他文档
--------
@@ -195,9 +130,9 @@ TODOList:
TODOList:
* staging/index
-* watch_queue
-目录和表格
+
+索引和表格
----------
* :ref:`genindex`
diff --git a/Documentation/translations/zh_CN/mm/ksm.rst b/Documentation/translations/zh_CN/mm/ksm.rst
index d1f82e857ad7..f0f458753d0c 100644
--- a/Documentation/translations/zh_CN/mm/ksm.rst
+++ b/Documentation/translations/zh_CN/mm/ksm.rst
@@ -30,7 +30,7 @@ KSM的用户空间的接å£åœ¨Documentation/translations/zh_CN/admin-guide/mm/ks
KSM维护ç€ç¨³å®šæ ‘中的KSM页的逆映射信æ¯ã€‚
当KSM页é¢çš„共享数å°äºŽ ``max_page_sharing`` 的虚拟内存区域(VMAs)时,则代表了
-KSM页的稳定树其中的节点指å‘了一个rmap_item结构体类型的列表。åŒæ—¶ï¼Œè¿™ä¸ªKSM页
+KSM页的稳定树其中的节点指å‘了一个ksm_rmap_item结构体类型的列表。åŒæ—¶ï¼Œè¿™ä¸ªKSM页
çš„ ``page->mapping`` 指å‘了该稳定树节点。
如果共享数超过了阈值,KSM将给稳定树添加第二个维度。稳定树就å˜æˆé“¾æŽ¥ä¸€ä¸ªæˆ–多
diff --git a/Documentation/translations/zh_CN/mm/page_owner.rst b/Documentation/translations/zh_CN/mm/page_owner.rst
index b7f81d7a6589..21a6a0837d42 100644
--- a/Documentation/translations/zh_CN/mm/page_owner.rst
+++ b/Documentation/translations/zh_CN/mm/page_owner.rst
@@ -74,15 +74,19 @@ page owner在默认情况下是ç¦ç”¨çš„。所以,如果你想使用它,你é
cat /sys/kernel/debug/page_owner > page_owner_full.txt
./page_owner_sort page_owner_full.txt sorted_page_owner.txt
- ``page_owner_full.txt`` 的一般输出情况如下(输出信æ¯æ— ç¿»è¯‘价值)::
+ ``page_owner_full.txt`` 的一般输出情况如下::
Page allocated via order XXX, ...
PFN XXX ...
- // Detailed stack
+ // 栈详情
Page allocated via order XXX, ...
PFN XXX ...
- // Detailed stack
+ // 栈详情
+ 默认情况下,它将以一个给定的pfn开始,åšå®Œæ•´çš„pfn转储,且page_owner支æŒfseek。
+
+ FILE *fp = fopen("/sys/kernel/debug/page_owner", "r");
+ fseek(fp, pfn_start, SEEK_SET);
``page_owner_sort`` 工具忽略了 ``PFN`` 行,将剩余的行放在buf中,使用regexpæ
å–页åºå€¼ï¼Œè®¡ç®—buf的次数和页数,最åŽæ ¹æ®å‚数进行排åºã€‚
diff --git a/Documentation/translations/zh_CN/process/howto.rst b/Documentation/translations/zh_CN/process/howto.rst
index 1455190dc087..5bf953146929 100644
--- a/Documentation/translations/zh_CN/process/howto.rst
+++ b/Documentation/translations/zh_CN/process/howto.rst
@@ -306,7 +306,7 @@ bugzilla.kernel.org是Linux内核开å‘者们用æ¥è·Ÿè¸ªå†…æ ¸Bug的网站。æˆ
网上很多地方都有这个邮件列表的存档(archive)。å¯ä»¥ä½¿ç”¨æœç´¢å¼•æ“Žæ¥æ‰¾åˆ°è¿™äº›
存档。比如:
- http://dir.gmane.org/gmane.linux.kernel
+ https://lore.kernel.org/lkml/
在å‘信之å‰ï¼Œæˆ‘们强烈建议你先在存档中æœç´¢ä½ æƒ³è¦è®¨è®ºçš„问题。很多已ç»è¢«è¯¦ç»†
讨论过的问题åªåœ¨é‚®ä»¶åˆ—表的存档中å¯ä»¥æ‰¾åˆ°ã€‚
diff --git a/Documentation/translations/zh_CN/process/index.rst b/Documentation/translations/zh_CN/process/index.rst
index a683dbea0c83..a1a35f88f4ae 100644
--- a/Documentation/translations/zh_CN/process/index.rst
+++ b/Documentation/translations/zh_CN/process/index.rst
@@ -10,6 +10,7 @@
.. _cn_process_index:
+========================
与Linux 内核社区一起工作
========================
diff --git a/Documentation/translations/zh_TW/process/howto.rst b/Documentation/translations/zh_TW/process/howto.rst
index 68ae4411285b..86b0d4c6d6f9 100644
--- a/Documentation/translations/zh_TW/process/howto.rst
+++ b/Documentation/translations/zh_TW/process/howto.rst
@@ -309,7 +309,7 @@ bugzilla.kernel.org是Linux內核開發者們用來跟蹤內核Bug的網站。æˆ
網上很多地方都有這個郵件列表的存檔(archive)。å¯ä»¥ä½¿ç”¨æœå°‹å¼•æ“Žä¾†æ‰¾åˆ°é€™äº›
存檔。比如:
- http://dir.gmane.org/gmane.linux.kernel
+ https://lore.kernel.org/lkml/
在發信之å‰ï¼Œæˆ‘們強烈建議你先在存檔中æœç´¢ä½ æƒ³è¦è¨Žè«–çš„å•é¡Œã€‚很多已經被詳細
討論éŽçš„å•é¡Œåªåœ¨éƒµä»¶åˆ—表的存檔中å¯ä»¥æ‰¾åˆ°ã€‚
diff --git a/MAINTAINERS b/MAINTAINERS
index 5c6ce094e55e..92708912fef8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -752,7 +752,7 @@ ALIBABA PMU DRIVER
M: Shuai Xue <xueshuai@linux.alibaba.com>
S: Supported
F: Documentation/admin-guide/perf/alibaba_pmu.rst
-F: drivers/perf/alibaba_uncore_dwr_pmu.c
+F: drivers/perf/alibaba_uncore_drw_pmu.c
ALIENWARE WMI DRIVER
L: Dell.Client.Kernel@dell.com
@@ -4459,13 +4459,15 @@ M: Josef Bacik <josef@toxicpanda.com>
M: David Sterba <dsterba@suse.com>
L: linux-btrfs@vger.kernel.org
S: Maintained
-W: http://btrfs.wiki.kernel.org/
-Q: http://patchwork.kernel.org/project/linux-btrfs/list/
+W: https://btrfs.readthedocs.io
+W: https://btrfs.wiki.kernel.org/
+Q: https://patchwork.kernel.org/project/linux-btrfs/list/
C: irc://irc.libera.chat/btrfs
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
F: Documentation/filesystems/btrfs.rst
F: fs/btrfs/
F: include/linux/btrfs*
+F: include/trace/events/btrfs.h
F: include/uapi/linux/btrfs*
BTTV VIDEO4LINUX DRIVER
@@ -5266,6 +5268,7 @@ F: tools/testing/selftests/cgroup/
CONTROL GROUP - BLOCK IO CONTROLLER (BLKIO)
M: Tejun Heo <tj@kernel.org>
+M: Josef Bacik <josef@toxicpanda.com>
M: Jens Axboe <axboe@kernel.dk>
L: cgroups@vger.kernel.org
L: linux-block@vger.kernel.org
@@ -5273,6 +5276,7 @@ T: git git://git.kernel.dk/linux-block
F: Documentation/admin-guide/cgroup-v1/blkio-controller.rst
F: block/bfq-cgroup.c
F: block/blk-cgroup.c
+F: block/blk-iocost.c
F: block/blk-iolatency.c
F: block/blk-throttle.c
F: include/linux/blk-cgroup.h
@@ -15359,17 +15363,6 @@ L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/ulp/opa_vnic
-OPEN FIRMWARE AND DEVICE TREE OVERLAYS
-M: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
-M: Frank Rowand <frowand.list@gmail.com>
-L: devicetree@vger.kernel.org
-S: Maintained
-F: Documentation/devicetree/dynamic-resolution-notes.rst
-F: Documentation/devicetree/overlay-notes.rst
-F: drivers/of/overlay.c
-F: drivers/of/resolver.c
-K: of_overlay_notifier_
-
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh+dt@kernel.org>
M: Frank Rowand <frowand.list@gmail.com>
@@ -15382,6 +15375,9 @@ F: Documentation/ABI/testing/sysfs-firmware-ofw
F: drivers/of/
F: include/linux/of*.h
F: scripts/dtc/
+K: of_overlay_notifier_
+K: of_overlay_fdt_apply
+K: of_overlay_remove
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh+dt@kernel.org>
@@ -15419,7 +15415,7 @@ M: Stafford Horne <shorne@gmail.com>
L: openrisc@lists.librecores.org
S: Maintained
W: http://openrisc.io
-T: git git://github.com/openrisc/linux.git
+T: git https://github.com/openrisc/linux.git
F: Documentation/devicetree/bindings/openrisc/
F: Documentation/openrisc/
F: arch/openrisc/
@@ -17710,6 +17706,7 @@ M: Palmer Dabbelt <palmer@dabbelt.com>
M: Albert Ou <aou@eecs.berkeley.edu>
L: linux-riscv@lists.infradead.org
S: Supported
+Q: https://patchwork.kernel.org/project/linux-riscv/list/
P: Documentation/riscv/patch-acceptance.rst
T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
F: arch/riscv/
@@ -17721,12 +17718,13 @@ M: Conor Dooley <conor.dooley@microchip.com>
M: Daire McNamara <daire.mcnamara@microchip.com>
L: linux-riscv@lists.infradead.org
S: Supported
-F: Documentation/devicetree/bindings/clock/microchip,mpfs.yaml
+F: Documentation/devicetree/bindings/clock/microchip,mpfs*.yaml
F: Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml
F: Documentation/devicetree/bindings/i2c/microchip,corei2c.yaml
F: Documentation/devicetree/bindings/mailbox/microchip,mpfs-mailbox.yaml
F: Documentation/devicetree/bindings/net/can/microchip,mpfs-can.yaml
F: Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml
+F: Documentation/devicetree/bindings/riscv/microchip.yaml
F: Documentation/devicetree/bindings/soc/microchip/microchip,mpfs-sys-controller.yaml
F: Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
F: Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml
@@ -18217,7 +18215,6 @@ F: include/media/drv-intf/s3c_camif.h
SAMSUNG S3FWRN5 NFC DRIVER
M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-M: Krzysztof Opasiak <k.opasiak@samsung.com>
L: linux-nfc@lists.01.org (subscribers-only)
S: Maintained
F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
@@ -18523,8 +18520,7 @@ S: Maintained
F: drivers/mmc/host/sdhci-esdhc-imx.c
SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER
-M: Jonathan Derrick <jonathan.derrick@intel.com>
-M: Revanth Rajashekar <revanth.rajashekar@intel.com>
+M: Jonathan Derrick <jonathan.derrick@linux.dev>
L: linux-block@vger.kernel.org
S: Supported
F: block/opal_proto.h
@@ -21300,7 +21296,7 @@ L: linux-usb@vger.kernel.org
L: netdev@vger.kernel.org
S: Maintained
W: https://github.com/petkan/pegasus
-T: git git://github.com/petkan/pegasus.git
+T: git https://github.com/petkan/pegasus.git
F: drivers/net/usb/pegasus.*
USB PHY LAYER
@@ -21337,7 +21333,7 @@ L: linux-usb@vger.kernel.org
L: netdev@vger.kernel.org
S: Maintained
W: https://github.com/petkan/rtl8150
-T: git git://github.com/petkan/rtl8150.git
+T: git https://github.com/petkan/rtl8150.git
F: drivers/net/usb/rtl8150.c
USB SERIAL SUBSYSTEM
diff --git a/Makefile b/Makefile
index cfbe6a7de640..f41ec8c8426b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
-PATCHLEVEL = 0
+PATCHLEVEL = 1
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@@ -1979,6 +1979,8 @@ endif
single-goals := $(addprefix $(build-dir)/, $(single-no-ko))
+KBUILD_MODULES := 1
+
endif
# Preset locale variables to speed up the build process. Limit locale
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index 6d0b3baf97ff..e9348aec4649 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -803,7 +803,7 @@ void __iomem *marvel_ioportmap (unsigned long addr)
return (void __iomem *)addr;
}
-unsigned u8
+u8
marvel_ioread8(const void __iomem *xaddr)
{
unsigned long addr = (unsigned long) xaddr;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index fc30df88ffbe..a2b31d91a1b6 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -371,7 +371,7 @@ static unsigned long sigpage_addr(const struct mm_struct *mm,
slots = ((last - first) >> PAGE_SHIFT) + 1;
- offset = get_random_int() % slots;
+ offset = prandom_u32_max(slots);
addr = first + (offset << PAGE_SHIFT);
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index ea128e32e8ca..e07f359254c3 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -655,7 +655,7 @@ struct page *get_signal_page(void)
PAGE_SIZE / sizeof(u32));
/* Give the signal return code some randomness */
- offset = 0x200 + (get_random_int() & 0x7fc);
+ offset = 0x200 + (get_random_u16() & 0x7fc);
signal_return_offset = offset;
/* Copy signal return handlers into the page */
diff --git a/arch/arm/mach-mmp/devices.c b/arch/arm/mach-mmp/devices.c
index 79f4a2aa5475..9968239d8041 100644
--- a/arch/arm/mach-mmp/devices.c
+++ b/arch/arm/mach-mmp/devices.c
@@ -238,7 +238,7 @@ void pxa_usb_phy_deinit(void __iomem *phy_reg)
static u64 __maybe_unused usb_dma_mask = ~(u32)0;
#if IS_ENABLED(CONFIG_PHY_PXA_USB)
-struct resource pxa168_usb_phy_resources[] = {
+static struct resource pxa168_usb_phy_resources[] = {
[0] = {
.start = PXA168_U2O_PHYBASE,
.end = PXA168_U2O_PHYBASE + USB_PHY_RANGE,
@@ -259,7 +259,7 @@ struct platform_device pxa168_device_usb_phy = {
#endif /* CONFIG_PHY_PXA_USB */
#if IS_ENABLED(CONFIG_USB_MV_UDC)
-struct resource pxa168_u2o_resources[] = {
+static struct resource pxa168_u2o_resources[] = {
/* regbase */
[0] = {
.start = PXA168_U2O_REGBASE + U2x_CAPREGS_OFFSET,
@@ -294,7 +294,7 @@ struct platform_device pxa168_device_u2o = {
#endif /* CONFIG_USB_MV_UDC */
#if IS_ENABLED(CONFIG_USB_EHCI_MV_U2O)
-struct resource pxa168_u2oehci_resources[] = {
+static struct resource pxa168_u2oehci_resources[] = {
[0] = {
.start = PXA168_U2O_REGBASE,
.end = PXA168_U2O_REGBASE + USB_REG_RANGE,
@@ -321,7 +321,7 @@ struct platform_device pxa168_device_u2oehci = {
#endif
#if IS_ENABLED(CONFIG_USB_MV_OTG)
-struct resource pxa168_u2ootg_resources[] = {
+static struct resource pxa168_u2ootg_resources[] = {
/* regbase */
[0] = {
.start = PXA168_U2O_REGBASE + U2x_CAPREGS_OFFSET,
diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h
index 43b7996ab754..9e36920d4cfd 100644
--- a/arch/arm/mach-spear/generic.h
+++ b/arch/arm/mach-spear/generic.h
@@ -25,11 +25,8 @@ extern struct pl022_ssp_controller pl022_plat_data;
extern struct pl08x_platform_data pl080_plat_data;
void __init spear_setup_of_timer(void);
-void __init spear3xx_clk_init(void __iomem *misc_base,
- void __iomem *soc_config_base);
void __init spear3xx_map_io(void);
void __init spear3xx_dt_init_irq(void);
-void __init spear6xx_clk_init(void __iomem *misc_base);
void __init spear13xx_map_io(void);
void __init spear13xx_l2x0_init(void);
diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c
index 2ba406e92c41..7ef9670d3029 100644
--- a/arch/arm/mach-spear/spear3xx.c
+++ b/arch/arm/mach-spear/spear3xx.c
@@ -13,6 +13,7 @@
#include <linux/amba/pl022.h>
#include <linux/amba/pl080.h>
#include <linux/clk.h>
+#include <linux/clk/spear.h>
#include <linux/io.h>
#include <asm/mach/map.h>
#include "pl080.h"
diff --git a/arch/arm/mach-spear/spear6xx.c b/arch/arm/mach-spear/spear6xx.c
index 58183493e06d..f0a1e704cceb 100644
--- a/arch/arm/mach-spear/spear6xx.c
+++ b/arch/arm/mach-spear/spear6xx.c
@@ -12,6 +12,7 @@
#include <linux/amba/pl08x.h>
#include <linux/clk.h>
+#include <linux/clk/spear.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -339,7 +340,7 @@ static struct pl08x_platform_data spear6xx_pl080_plat_data = {
* 0xD0000000 0xFD000000
* 0xFC000000 0xFC000000
*/
-struct map_desc spear6xx_io_desc[] __initdata = {
+static struct map_desc spear6xx_io_desc[] __initdata = {
{
.virtual = (unsigned long)VA_SPEAR6XX_ML_CPU_BASE,
.pfn = __phys_to_pfn(SPEAR_ICM3_ML1_2_BASE),
@@ -359,12 +360,12 @@ struct map_desc spear6xx_io_desc[] __initdata = {
};
/* This will create static memory mapping for selected devices */
-void __init spear6xx_map_io(void)
+static void __init spear6xx_map_io(void)
{
iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc));
}
-void __init spear6xx_timer_init(void)
+static void __init spear6xx_timer_init(void)
{
char pclk_name[] = "pll3_clk";
struct clk *gpt_clk, *pclk;
@@ -394,7 +395,7 @@ void __init spear6xx_timer_init(void)
}
/* Add auxdata to pass platform data */
-struct of_dev_auxdata spear6xx_auxdata_lookup[] __initdata = {
+static struct of_dev_auxdata spear6xx_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("arm,pl080", SPEAR_ICM3_DMA_BASE, NULL,
&spear6xx_pl080_plat_data),
{}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f6737d2f37b2..505c8a1ccbe0 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -632,6 +632,23 @@ config ARM64_ERRATUM_1530923
config ARM64_WORKAROUND_REPEAT_TLBI
bool
+config ARM64_ERRATUM_2441007
+ bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
+ default y
+ select ARM64_WORKAROUND_REPEAT_TLBI
+ help
+ This option adds a workaround for ARM Cortex-A55 erratum #2441007.
+
+ Under very rare circumstances, affected Cortex-A55 CPUs
+ may not handle a race between a break-before-make sequence on one
+ CPU, and another CPU accessing the same page. This could allow a
+ store to a page that has been unmapped.
+
+ Work around this by adding the affected CPUs to the list that needs
+ TLB sequences to be done twice.
+
+ If unsure, say Y.
+
config ARM64_ERRATUM_1286807
bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
default y
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 8aa0d276a636..abc418650fec 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -60,6 +60,7 @@
#define ARM_CPU_IMP_FUJITSU 0x46
#define ARM_CPU_IMP_HISI 0x48
#define ARM_CPU_IMP_APPLE 0x61
+#define ARM_CPU_IMP_AMPERE 0xC0
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -123,6 +124,8 @@
#define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
#define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029
+#define AMPERE_CPU_PART_AMPERE1 0xAC3
+
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -172,6 +175,7 @@
#define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
#define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 58ca4f6b25d6..89ac00084f38 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -230,6 +230,11 @@ static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_2441007
+ {
+ ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ },
+#endif
#ifdef CONFIG_ARM64_ERRATUM_2441009
{
/* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index aca88470fb69..7467217c1eaf 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -48,7 +48,12 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
if (!pte_is_tagged)
return;
- mte_clear_page_tags(page_address(page));
+ /*
+ * Test PG_mte_tagged again in case it was racing with another
+ * set_pte_at().
+ */
+ if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ mte_clear_page_tags(page_address(page));
}
void mte_sync_tags(pte_t old_pte, pte_t pte)
@@ -64,7 +69,7 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
- if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ if (!test_bit(PG_mte_tagged, &page->flags))
mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged);
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 9015f49c206e..044a7d7f1f6a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -591,7 +591,7 @@ unsigned long __get_wchan(struct task_struct *p)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= prandom_u32_max(PAGE_SIZE);
return sp & ~0xf;
}
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index a8ea1637b137..bfce41c2a53b 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -868,6 +868,10 @@ u8 spectre_bhb_loop_affected(int scope)
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
{},
};
+ static const struct midr_range spectre_bhb_k11_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_AMPERE1),
+ {},
+ };
static const struct midr_range spectre_bhb_k8_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
@@ -878,6 +882,8 @@ u8 spectre_bhb_loop_affected(int scope)
k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
k = 24;
+ else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
+ k = 11;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
k = 8;
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 733451fe7e41..d72e8f23422d 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -67,7 +67,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
*
* The resulting 5 bits of entropy is seen in SP[8:4].
*/
- choose_random_kstack_offset(get_random_int() & 0x1FF);
+ choose_random_kstack_offset(get_random_u16() & 0x1FF);
}
static inline bool has_syscall_work(unsigned long flags)
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
index 4334dec93bd4..bed803d8e158 100644
--- a/arch/arm64/mm/mteswap.c
+++ b/arch/arm64/mm/mteswap.c
@@ -53,7 +53,12 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
if (!tags)
return false;
- mte_restore_page_tags(page_address(page), tags);
+ /*
+ * Test PG_mte_tagged again in case it was racing with another
+ * set_pte_at().
+ */
+ if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ mte_restore_page_tags(page_address(page), tags);
return true;
}
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 7f1fb36f208c..384757a7eda9 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -732,7 +732,7 @@ EndSysreg
Sysreg SCTLR_EL1 3 0 1 0 0
Field 63 TIDCP
-Field 62 SPINMASK
+Field 62 SPINTMASK
Field 61 NMI
Field 60 EnTP2
Res0 59:58
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 8ea57e2f0e04..946704bee599 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -412,6 +412,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
__update_tlb(vma, address, ptep);
}
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#define update_mmu_tlb update_mmu_cache
+
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index 660492f064e7..1256e3582475 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -293,7 +293,7 @@ unsigned long stack_top(void)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= prandom_u32_max(PAGE_SIZE);
return sp & STACK_ALIGN;
}
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
index f32c38abd791..8c9826062652 100644
--- a/arch/loongarch/kernel/vdso.c
+++ b/arch/loongarch/kernel/vdso.c
@@ -78,7 +78,7 @@ static unsigned long vdso_base(void)
unsigned long base = STACK_TOP;
if (current->flags & PF_RANDOMIZE) {
- base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+ base += prandom_u32_max(VDSO_RANDOMIZE_SIZE);
base = PAGE_ALIGN(base);
}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 35b912bce429..bbe9ce471791 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -711,7 +711,7 @@ unsigned long mips_stack_top(void)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= prandom_u32_max(PAGE_SIZE);
return sp & ALMASK;
}
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index b2cc2c2dd4bf..5fd9bf1d596c 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -79,7 +79,7 @@ static unsigned long vdso_base(void)
}
if (current->flags & PF_RANDOMIZE) {
- base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+ base += prandom_u32_max(VDSO_RANDOMIZE_SIZE);
base = PAGE_ALIGN(base);
}
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index a82b2caaa560..b3edbb33b621 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -74,10 +74,10 @@ void *arch_dma_set_uncached(void *cpu_addr, size_t size)
* We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit.
*/
- mmap_read_lock(&init_mm);
- error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
- NULL);
- mmap_read_unlock(&init_mm);
+ mmap_write_lock(&init_mm);
+ error = walk_page_range_novma(&init_mm, va, va + size,
+ &set_nocache_walk_ops, NULL, NULL);
+ mmap_write_unlock(&init_mm);
if (error)
return ERR_PTR(error);
@@ -88,11 +88,11 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size)
{
unsigned long va = (unsigned long)cpu_addr;
- mmap_read_lock(&init_mm);
+ mmap_write_lock(&init_mm);
/* walk_page_range shouldn't be able to fail here */
- WARN_ON(walk_page_range(&init_mm, va, va + size,
- &clear_nocache_walk_ops, NULL));
- mmap_read_unlock(&init_mm);
+ WARN_ON(walk_page_range_novma(&init_mm, va, va + size,
+ &clear_nocache_walk_ops, NULL, NULL));
+ mmap_write_unlock(&init_mm);
}
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h
index 0ec54f43d6d2..1ed45fd085d3 100644
--- a/arch/parisc/include/asm/alternative.h
+++ b/arch/parisc/include/asm/alternative.h
@@ -22,10 +22,10 @@
struct alt_instr {
s32 orig_offset; /* offset to original instructions */
- s32 len; /* end of original instructions */
- u32 cond; /* see ALT_COND_XXX */
+ s16 len; /* end of original instructions */
+ u16 cond; /* see ALT_COND_XXX */
u32 replacement; /* replacement instruction or code */
-};
+} __packed;
void set_kernel_text_rw(int enable_read_write);
void apply_alternatives_all(void);
@@ -35,8 +35,9 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* Alternative SMP implementation. */
#define ALTERNATIVE(cond, replacement) "!0:" \
".section .altinstructions, \"aw\" !" \
- ".word (0b-4-.), 1, " __stringify(cond) "," \
- __stringify(replacement) " !" \
+ ".word (0b-4-.) !" \
+ ".hword 1, " __stringify(cond) " !" \
+ ".word " __stringify(replacement) " !" \
".previous"
#else
@@ -44,15 +45,17 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* to replace one single instructions by a new instruction */
#define ALTERNATIVE(from, to, cond, replacement)\
.section .altinstructions, "aw" ! \
- .word (from - .), (to - from)/4 ! \
- .word cond, replacement ! \
+ .word (from - .) ! \
+ .hword (to - from)/4, cond ! \
+ .word replacement ! \
.previous
/* to replace multiple instructions by new code */
#define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
.section .altinstructions, "aw" ! \
- .word (from - .), -num_instructions ! \
- .word cond, (new_instr_ptr - .) ! \
+ .word (from - .) ! \
+ .hword -num_instructions, cond ! \
+ .word (new_instr_ptr - .) ! \
.previous
#endif /* __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index b643092d4b98..fcbcf9a96c11 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -19,9 +19,6 @@ extern unsigned long parisc_pat_pdc_cap; /* PDC capabilities (PAT) */
#define PDC_TYPE_SYSTEM_MAP 1 /* 32-bit, but supports PDC_SYSTEM_MAP */
#define PDC_TYPE_SNAKE 2 /* Doesn't support SYSTEM_MAP */
-void pdc_console_init(void); /* in pdc_console.c */
-void pdc_console_restart(void);
-
void setup_pdc(void); /* in inventory.c */
/* wrapper-functions from pdc.c */
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index df7b931865d2..ecd028854469 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -192,6 +192,11 @@ extern void __update_cache(pte_t pte);
#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
+#ifdef CONFIG_HUGETLB_PAGE
+#define _PAGE_SPECIAL_BIT _PAGE_DMB_BIT /* DMB feature is currently unused */
+#else
+#define _PAGE_SPECIAL_BIT _PAGE_HPAGE_BIT /* use unused HUGE PAGE bit */
+#endif
/* N.B. The bits are defined in terms of a 32 bit word above, so the */
/* following macro is ok for both 32 and 64 bit. */
@@ -219,7 +224,7 @@ extern void __update_cache(pte_t pte);
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
-#define _PAGE_SPECIAL (_PAGE_DMB)
+#define _PAGE_SPECIAL (1 << xlate_pabit(_PAGE_SPECIAL_BIT))
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c
index daa1e9047275..66f5672c70bd 100644
--- a/arch/parisc/kernel/alternative.c
+++ b/arch/parisc/kernel/alternative.c
@@ -26,7 +26,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *entry;
int index = 0, applied = 0;
int num_cpus = num_online_cpus();
- u32 cond_check;
+ u16 cond_check;
cond_check = ALT_COND_ALWAYS |
((num_cpus == 1) ? ALT_COND_NO_SMP : 0) |
@@ -45,8 +45,9 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
for (entry = start; entry < end; entry++, index++) {
- u32 *from, cond, replacement;
- s32 len;
+ u32 *from, replacement;
+ u16 cond;
+ s16 len;
from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset);
len = entry->len;
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index df8102fb435f..0e5ebfe8d9d2 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -499,6 +499,10 @@
* Finally, _PAGE_READ goes in the top bit of PL1 (so we
* trigger an access rights trap in user space if the user
* tries to read an unreadable page */
+#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
+ /* need to drop DMB bit, as it's used as SPECIAL flag */
+ depi 0,_PAGE_SPECIAL_BIT,1,\pte
+#endif
depd \pte,8,7,\prot
/* PAGE_USER indicates the page can be read with user privileges,
@@ -529,6 +533,10 @@
* makes the tlb entry for the differently formatted pa11
* insertion instructions */
.macro make_insert_tlb_11 spc,pte,prot
+#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
+ /* need to drop DMB bit, as it's used as SPECIAL flag */
+ depi 0,_PAGE_SPECIAL_BIT,1,\pte
+#endif
zdep \spc,30,15,\prot
dep \pte,8,7,\prot
extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 2661cdd256ae..7d0989f523d0 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -1,46 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * PDC Console support - ie use firmware to dump text via boot console
+ * PDC early console support - use PDC firmware to dump text via boot console
*
- * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
- * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
- * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
- * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
- * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
- * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
- * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
- * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
- * Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
- * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
- * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
- * Copyright (C) 2010 Guy Martin <gmsoft at tuxicoman.be>
+ * Copyright (C) 2001-2022 Helge Deller <deller@gmx.de>
*/
-/*
- * The PDC console is a simple console, which can be used for debugging
- * boot related problems on HP PA-RISC machines. It is also useful when no
- * other console works.
- *
- * This code uses the ROM (=PDC) based functions to read and write characters
- * from and to PDC's boot path.
- */
-
-/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems.
- * On production kernels EARLY_BOOTUP_DEBUG should be undefined. */
-#define EARLY_BOOTUP_DEBUG
-
-
-#include <linux/kernel.h>
#include <linux/console.h>
-#include <linux/string.h>
#include <linux/init.h>
-#include <linux/major.h>
-#include <linux/tty.h>
+#include <linux/serial_core.h>
+#include <linux/kgdb.h>
#include <asm/page.h> /* for PAGE0 */
#include <asm/pdc.h> /* for iodc_call() proto and friends */
static DEFINE_SPINLOCK(pdc_console_lock);
-static struct console pdc_cons;
static void pdc_console_write(struct console *co, const char *s, unsigned count)
{
@@ -54,7 +26,8 @@ static void pdc_console_write(struct console *co, const char *s, unsigned count)
spin_unlock_irqrestore(&pdc_console_lock, flags);
}
-int pdc_console_poll_key(struct console *co)
+#ifdef CONFIG_KGDB
+static int kgdb_pdc_read_char(void)
{
int c;
unsigned long flags;
@@ -63,201 +36,40 @@ int pdc_console_poll_key(struct console *co)
c = pdc_iodc_getc();
spin_unlock_irqrestore(&pdc_console_lock, flags);
- return c;
-}
-
-static int pdc_console_setup(struct console *co, char *options)
-{
- return 0;
-}
-
-#if defined(CONFIG_PDC_CONSOLE)
-#include <linux/vt_kern.h>
-#include <linux/tty_flip.h>
-
-#define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
-
-static void pdc_console_poll(struct timer_list *unused);
-static DEFINE_TIMER(pdc_console_timer, pdc_console_poll);
-static struct tty_port tty_port;
-
-static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
-{
- tty_port_tty_set(&tty_port, tty);
- mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
-
- return 0;
+ return (c <= 0) ? NO_POLL_CHAR : c;
}
-static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
+static void kgdb_pdc_write_char(u8 chr)
{
- if (tty->count == 1) {
- del_timer_sync(&pdc_console_timer);
- tty_port_tty_set(&tty_port, NULL);
- }
+ if (PAGE0->mem_cons.cl_class != CL_DUPLEX)
+ pdc_console_write(NULL, &chr, 1);
}
-static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- pdc_console_write(NULL, buf, count);
- return count;
-}
-
-static unsigned int pdc_console_tty_write_room(struct tty_struct *tty)
-{
- return 32768; /* no limit, no buffer used */
-}
-
-static const struct tty_operations pdc_console_tty_ops = {
- .open = pdc_console_tty_open,
- .close = pdc_console_tty_close,
- .write = pdc_console_tty_write,
- .write_room = pdc_console_tty_write_room,
+static struct kgdb_io kgdb_pdc_io_ops = {
+ .name = "kgdb_pdc",
+ .read_char = kgdb_pdc_read_char,
+ .write_char = kgdb_pdc_write_char,
};
-
-static void pdc_console_poll(struct timer_list *unused)
-{
- int data, count = 0;
-
- while (1) {
- data = pdc_console_poll_key(NULL);
- if (data == -1)
- break;
- tty_insert_flip_char(&tty_port, data & 0xFF, TTY_NORMAL);
- count ++;
- }
-
- if (count)
- tty_flip_buffer_push(&tty_port);
-
- if (pdc_cons.flags & CON_ENABLED)
- mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
-}
-
-static struct tty_driver *pdc_console_tty_driver;
-
-static int __init pdc_console_tty_driver_init(void)
-{
- struct tty_driver *driver;
- int err;
-
- /* Check if the console driver is still registered.
- * It is unregistered if the pdc console was not selected as the
- * primary console. */
-
- struct console *tmp;
-
- console_lock();
- for_each_console(tmp)
- if (tmp == &pdc_cons)
- break;
- console_unlock();
-
- if (!tmp) {
- printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
- return -ENODEV;
- }
-
- printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n");
- pdc_cons.flags &= ~CON_BOOT;
-
- driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_RESET_TERMIOS);
- if (IS_ERR(driver))
- return PTR_ERR(driver);
-
- tty_port_init(&tty_port);
-
- driver->driver_name = "pdc_cons";
- driver->name = "ttyB";
- driver->major = MUX_MAJOR;
- driver->minor_start = 0;
- driver->type = TTY_DRIVER_TYPE_SYSTEM;
- driver->init_termios = tty_std_termios;
- tty_set_operations(driver, &pdc_console_tty_ops);
- tty_port_link_device(&tty_port, driver, 0);
-
- err = tty_register_driver(driver);
- if (err) {
- printk(KERN_ERR "Unable to register the PDC console TTY driver\n");
- tty_port_destroy(&tty_port);
- tty_driver_kref_put(driver);
- return err;
- }
-
- pdc_console_tty_driver = driver;
-
- return 0;
-}
-device_initcall(pdc_console_tty_driver_init);
-
-static struct tty_driver * pdc_console_device (struct console *c, int *index)
-{
- *index = c->index;
- return pdc_console_tty_driver;
-}
-#else
-#define pdc_console_device NULL
#endif
-static struct console pdc_cons = {
- .name = "ttyB",
- .write = pdc_console_write,
- .device = pdc_console_device,
- .setup = pdc_console_setup,
- .flags = CON_BOOT | CON_PRINTBUFFER,
- .index = -1,
-};
-
-static int pdc_console_initialized;
-
-static void pdc_console_init_force(void)
+static int __init pdc_earlycon_setup(struct earlycon_device *device,
+ const char *opt)
{
- if (pdc_console_initialized)
- return;
- ++pdc_console_initialized;
-
+ struct console *earlycon_console;
+
/* If the console is duplex then copy the COUT parameters to CIN. */
if (PAGE0->mem_cons.cl_class == CL_DUPLEX)
memcpy(&PAGE0->mem_kbd, &PAGE0->mem_cons, sizeof(PAGE0->mem_cons));
- /* register the pdc console */
- register_console(&pdc_cons);
-}
+ earlycon_console = device->con;
+ earlycon_console->write = pdc_console_write;
+ device->port.iotype = UPIO_MEM32BE;
-void __init pdc_console_init(void)
-{
-#if defined(EARLY_BOOTUP_DEBUG) || defined(CONFIG_PDC_CONSOLE)
- pdc_console_init_force();
+#ifdef CONFIG_KGDB
+ kgdb_register_io_module(&kgdb_pdc_io_ops);
#endif
-#ifdef EARLY_BOOTUP_DEBUG
- printk(KERN_INFO "Initialized PDC Console for debugging.\n");
-#endif
-}
-
-
-/*
- * Used for emergencies. Currently only used if an HPMC occurs. If an
- * HPMC occurs, it is possible that the current console may not be
- * properly initialised after the PDC IO reset. This routine unregisters
- * all of the current consoles, reinitializes the pdc console and
- * registers it.
- */
-
-void pdc_console_restart(void)
-{
- struct console *console;
-
- if (pdc_console_initialized)
- return;
- /* If we've already seen the output, don't bother to print it again */
- if (console_drivers != NULL)
- pdc_cons.flags &= ~CON_PRINTBUFFER;
-
- while ((console = console_drivers) != NULL)
- unregister_console(console_drivers);
-
- /* force registering the pdc console */
- pdc_console_init_force();
+ return 0;
}
+
+EARLYCON_DECLARE(pdc, pdc_earlycon_setup);
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 3db0e97e6c06..c4f8374c7018 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -284,7 +284,7 @@ __get_wchan(struct task_struct *p)
static inline unsigned long brk_rnd(void)
{
- return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
+ return (get_random_u32() & BRK_RND_MASK) << PAGE_SHIFT;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index f005ddedb50e..375f38d6e1a4 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -70,6 +70,10 @@ void __init setup_cmdline(char **cmdline_p)
strlcat(p, "tty0", COMMAND_LINE_SIZE);
}
+ /* default to use early console */
+ if (!strstr(p, "earlycon"))
+ strlcat(p, " earlycon=pdc", COMMAND_LINE_SIZE);
+
#ifdef CONFIG_BLK_DEV_INITRD
if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
{
@@ -139,8 +143,6 @@ void __init setup_arch(char **cmdline_p)
if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
panic("KERNEL_INITIAL_ORDER too small!");
- pdc_console_init();
-
#ifdef CONFIG_64BIT
if(parisc_narrow_firmware) {
printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n");
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 2b34294517a1..848b0702005d 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -239,14 +239,14 @@ static unsigned long mmap_rnd(void)
unsigned long rnd = 0;
if (current->flags & PF_RANDOMIZE)
- rnd = get_random_int() & MMAP_RND_MASK;
+ rnd = get_random_u32() & MMAP_RND_MASK;
return rnd << PAGE_SHIFT;
}
unsigned long arch_mmap_rnd(void)
{
- return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
+ return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
}
static unsigned long mmap_legacy_base(void)
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index b78f1b9d45c1..f9696fbf646c 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -239,13 +239,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
- /* maybe the kernel hasn't booted very far yet and hasn't been able
- * to initialize the serial or STI console. In that case we should
- * re-enable the pdc console, so that the user will be able to
- * identify the problem. */
- if (!console_drivers)
- pdc_console_restart();
-
if (err)
printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
current->comm, task_pid_nr(current), str, err);
@@ -429,10 +422,6 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
- /* restart pdc console if necessary */
- if (!console_drivers)
- pdc_console_restart();
-
/* Not all paths will gutter the processor... */
switch(code){
@@ -482,9 +471,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
unsigned long fault_space = 0;
int si_code;
- if (code == 1)
- pdc_console_restart(); /* switch back to pdc if HPMC */
- else if (!irqs_disabled_flags(regs->gr[0]))
+ if (!irqs_disabled_flags(regs->gr[0]))
local_irq_enable();
/* Security check:
diff --git a/arch/parisc/kernel/vdso.c b/arch/parisc/kernel/vdso.c
index 63dc44c4c246..47e5960a2f96 100644
--- a/arch/parisc/kernel/vdso.c
+++ b/arch/parisc/kernel/vdso.c
@@ -75,7 +75,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
map_base = mm->mmap_base;
if (current->flags & PF_RANDOMIZE)
- map_base -= (get_random_int() & 0x1f) * PAGE_SIZE;
+ map_base -= prandom_u32_max(0x20) * PAGE_SIZE;
vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0);
diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c
index c1c1ef9457fb..273c527868db 100644
--- a/arch/powerpc/crypto/crc-vpmsum_test.c
+++ b/arch/powerpc/crypto/crc-vpmsum_test.c
@@ -82,7 +82,7 @@ static int __init crc_test_init(void)
if (len <= offset)
continue;
- prandom_bytes(data, len);
+ get_random_bytes(data, len);
len -= offset;
crypto_shash_update(crct10dif_shash, data+offset, len);
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h
index 9840d572da55..a1142496cd58 100644
--- a/arch/powerpc/include/asm/syscalls.h
+++ b/arch/powerpc/include/asm/syscalls.h
@@ -89,6 +89,22 @@ long compat_sys_rt_sigreturn(void);
* responsible for combining parameter pairs.
*/
+#ifdef CONFIG_PPC32
+long sys_ppc_pread64(unsigned int fd,
+ char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long sys_ppc_pwrite64(unsigned int fd,
+ const char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long sys_ppc_readahead(int fd, u32 r4,
+ u32 offset1, u32 offset2, u32 count);
+long sys_ppc_truncate64(const char __user *path, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long sys_ppc_ftruncate64(unsigned int fd, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
+ size_t len, int advice);
+#endif
#ifdef CONFIG_COMPAT
long compat_sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ee2d76cb3187..9b6146056e48 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -73,6 +73,7 @@ obj-y := cputable.o syscalls.o \
obj-y += ptrace/
obj-$(CONFIG_PPC64) += setup_64.o irq_64.o\
paca.o nvram_64.o note.o
+obj-$(CONFIG_PPC32) += sys_ppc32.o
obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
obj-$(CONFIG_VDSO32) += vdso32_wrapper.o
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index 904a5608cbe3..978a173eb339 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -538,7 +538,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled
lbz r11,PACAIRQHAPPENED(r13)
andi. r10,r11,PACA_IRQ_MUST_HARD_MASK
- beq 1f // No HARD_MASK pending
+ beq .Lfast_kernel_interrupt_return_\srr\() // No HARD_MASK pending
/* Must clear MSR_EE from _MSR */
#ifdef CONFIG_PPC_BOOK3S
@@ -555,12 +555,23 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
b .Lfast_kernel_interrupt_return_\srr\()
.Linterrupt_return_\srr\()_soft_enabled:
+ /*
+ * In the soft-enabled case, need to double-check that we have no
+ * pending interrupts that might have come in before we reached the
+ * restart section of code, and restart the exit so those can be
+ * handled.
+ *
+ * If there are none, it is be possible that the interrupt still
+ * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the
+ * interrupted context. This clear will not clobber a new pending
+ * interrupt coming in, because we're in the restart section, so
+ * such would return to the restart location.
+ */
#ifdef CONFIG_PPC_BOOK3S
lbz r11,PACAIRQHAPPENED(r13)
andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
bne- interrupt_return_\srr\()_kernel_restart
#endif
-1:
li r11,0
stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 40834ef84f0c..67da147fe34d 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -2303,6 +2303,6 @@ void notrace __ppc64_runlatch_off(void)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= prandom_u32_max(PAGE_SIZE);
return sp & ~0xf;
}
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index dcc3c9fd4cfd..1ab4a4d95aba 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -1,13 +1,23 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * sys_ppc32.c: Conversion between 32bit and 64bit native syscalls.
+ * sys_ppc32.c: 32-bit system calls with complex calling conventions.
*
* Copyright (C) 2001 IBM
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*
- * These routines maintain argument size conversion between 32bit and 64bit
- * environment.
+ * 32-bit system calls with 64-bit arguments pass those in register pairs.
+ * This must be specially dealt with on 64-bit kernels. The compat_arg_u64_dual
+ * in generic compat syscalls is not always usable because the register
+ * pairing is constrained depending on preceding arguments.
+ *
+ * An analogous problem exists on 32-bit kernels with ARCH_HAS_SYSCALL_WRAPPER,
+ * the defined system call functions take the pt_regs as an argument, and there
+ * is a mapping macro which maps registers to arguments
+ * (SC_POWERPC_REGS_TO_ARGS) which also does not deal with these 64-bit
+ * arguments.
+ *
+ * This file contains these system calls.
*/
#include <linux/kernel.h>
@@ -47,7 +57,17 @@
#include <asm/syscalls.h>
#include <asm/switch_to.h>
-COMPAT_SYSCALL_DEFINE6(ppc_pread64,
+#ifdef CONFIG_PPC32
+#define PPC32_SYSCALL_DEFINE4 SYSCALL_DEFINE4
+#define PPC32_SYSCALL_DEFINE5 SYSCALL_DEFINE5
+#define PPC32_SYSCALL_DEFINE6 SYSCALL_DEFINE6
+#else
+#define PPC32_SYSCALL_DEFINE4 COMPAT_SYSCALL_DEFINE4
+#define PPC32_SYSCALL_DEFINE5 COMPAT_SYSCALL_DEFINE5
+#define PPC32_SYSCALL_DEFINE6 COMPAT_SYSCALL_DEFINE6
+#endif
+
+PPC32_SYSCALL_DEFINE6(ppc_pread64,
unsigned int, fd,
char __user *, ubuf, compat_size_t, count,
u32, reg6, u32, pos1, u32, pos2)
@@ -55,7 +75,7 @@ COMPAT_SYSCALL_DEFINE6(ppc_pread64,
return ksys_pread64(fd, ubuf, count, merge_64(pos1, pos2));
}
-COMPAT_SYSCALL_DEFINE6(ppc_pwrite64,
+PPC32_SYSCALL_DEFINE6(ppc_pwrite64,
unsigned int, fd,
const char __user *, ubuf, compat_size_t, count,
u32, reg6, u32, pos1, u32, pos2)
@@ -63,28 +83,28 @@ COMPAT_SYSCALL_DEFINE6(ppc_pwrite64,
return ksys_pwrite64(fd, ubuf, count, merge_64(pos1, pos2));
}
-COMPAT_SYSCALL_DEFINE5(ppc_readahead,
+PPC32_SYSCALL_DEFINE5(ppc_readahead,
int, fd, u32, r4,
u32, offset1, u32, offset2, u32, count)
{
return ksys_readahead(fd, merge_64(offset1, offset2), count);
}
-COMPAT_SYSCALL_DEFINE4(ppc_truncate64,
+PPC32_SYSCALL_DEFINE4(ppc_truncate64,
const char __user *, path, u32, reg4,
unsigned long, len1, unsigned long, len2)
{
return ksys_truncate(path, merge_64(len1, len2));
}
-COMPAT_SYSCALL_DEFINE4(ppc_ftruncate64,
+PPC32_SYSCALL_DEFINE4(ppc_ftruncate64,
unsigned int, fd, u32, reg4,
unsigned long, len1, unsigned long, len2)
{
return ksys_ftruncate(fd, merge_64(len1, len2));
}
-COMPAT_SYSCALL_DEFINE6(ppc32_fadvise64,
+PPC32_SYSCALL_DEFINE6(ppc32_fadvise64,
int, fd, u32, unused, u32, offset1, u32, offset2,
size_t, len, int, advice)
{
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 2bca64f96164..e9e0df4f9a61 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -228,8 +228,10 @@
176 64 rt_sigtimedwait sys_rt_sigtimedwait
177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
-179 common pread64 sys_pread64 compat_sys_ppc_pread64
-180 common pwrite64 sys_pwrite64 compat_sys_ppc_pwrite64
+179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
+179 64 pread64 sys_pread64
+180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
+180 64 pwrite64 sys_pwrite64
181 common chown sys_chown
182 common getcwd sys_getcwd
183 common capget sys_capget
@@ -242,10 +244,11 @@
188 common putpmsg sys_ni_syscall
189 nospu vfork sys_vfork
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
-191 common readahead sys_readahead compat_sys_ppc_readahead
+191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
+191 64 readahead sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
-193 32 truncate64 sys_truncate64 compat_sys_ppc_truncate64
-194 32 ftruncate64 sys_ftruncate64 compat_sys_ppc_ftruncate64
+193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
+194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
195 32 stat64 sys_stat64
196 32 lstat64 sys_lstat64
197 32 fstat64 sys_fstat64
@@ -288,7 +291,8 @@
230 common io_submit sys_io_submit compat_sys_io_submit
231 common io_cancel sys_io_cancel
232 nospu set_tid_address sys_set_tid_address
-233 common fadvise64 sys_fadvise64 compat_sys_ppc32_fadvise64
+233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
+233 64 fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
236 common epoll_create sys_epoll_create
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 598006301620..e2f11f9c3f2a 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -508,10 +508,10 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
unsigned long start,
unsigned long end, unsigned long page_shift,
- struct kvm *kvm, unsigned long gpa)
+ struct kvm *kvm, unsigned long gpa, struct page *fault_page)
{
unsigned long src_pfn, dst_pfn = 0;
- struct migrate_vma mig;
+ struct migrate_vma mig = { 0 };
struct page *dpage, *spage;
struct kvmppc_uvmem_page_pvt *pvt;
unsigned long pfn;
@@ -525,6 +525,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
mig.dst = &dst_pfn;
mig.pgmap_owner = &kvmppc_uvmem_pgmap;
mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
+ mig.fault_page = fault_page;
/* The requested page is already paged-out, nothing to do */
if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
@@ -580,12 +581,14 @@ out_finalize:
static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long page_shift,
- struct kvm *kvm, unsigned long gpa)
+ struct kvm *kvm, unsigned long gpa,
+ struct page *fault_page)
{
int ret;
mutex_lock(&kvm->arch.uvmem_lock);
- ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
+ ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa,
+ fault_page);
mutex_unlock(&kvm->arch.uvmem_lock);
return ret;
@@ -634,7 +637,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
pvt->remove_gfn = true;
if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
- PAGE_SHIFT, kvm, pvt->gpa))
+ PAGE_SHIFT, kvm, pvt->gpa, NULL))
pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
pvt->gpa, addr);
} else {
@@ -715,7 +718,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
dpage = pfn_to_page(uvmem_pfn);
dpage->zone_device_data = pvt;
- lock_page(dpage);
+ zone_device_page_init(dpage);
return dpage;
out_clear:
spin_lock(&kvmppc_uvmem_bitmap_lock);
@@ -736,7 +739,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
bool pagein)
{
unsigned long src_pfn, dst_pfn = 0;
- struct migrate_vma mig;
+ struct migrate_vma mig = { 0 };
struct page *spage;
unsigned long pfn;
struct page *dpage;
@@ -994,7 +997,7 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
if (kvmppc_svm_page_out(vmf->vma, vmf->address,
vmf->address + PAGE_SIZE, PAGE_SHIFT,
- pvt->kvm, pvt->gpa))
+ pvt->kvm, pvt->gpa, vmf->page))
return VM_FAULT_SIGBUS;
else
return 0;
@@ -1065,7 +1068,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
if (!vma || vma->vm_start > start || vma->vm_end < end)
goto out;
- if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
+ if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL))
ret = H_SUCCESS;
out:
mmap_read_unlock(kvm->mm);
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 14e143b946a3..92310202bdd7 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -7,7 +7,7 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
setup.o iommu.o event_sources.o ras.o \
firmware.o power.o dlpar.o mobility.o rng.o \
pci.o pci_dlpar.o eeh_pseries.o msi.o \
- papr_platform_attributes.o
+ papr_platform_attributes.o dtl.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o
obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o
@@ -19,7 +19,6 @@ obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
obj-$(CONFIG_CMM) += cmm.o
-obj-$(CONFIG_DTL) += dtl.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 1b1977bc78e7..3f1cdccebc9c 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -18,6 +18,7 @@
#include <asm/plpar_wrappers.h>
#include <asm/machdep.h>
+#ifdef CONFIG_DTL
struct dtl {
struct dtl_entry *buf;
int cpu;
@@ -58,78 +59,6 @@ static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
static atomic_t dtl_count;
/*
- * Scan the dispatch trace log and count up the stolen time.
- * Should be called with interrupts disabled.
- */
-static notrace u64 scan_dispatch_log(u64 stop_tb)
-{
- u64 i = local_paca->dtl_ridx;
- struct dtl_entry *dtl = local_paca->dtl_curr;
- struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
- struct lppaca *vpa = local_paca->lppaca_ptr;
- u64 tb_delta;
- u64 stolen = 0;
- u64 dtb;
-
- if (!dtl)
- return 0;
-
- if (i == be64_to_cpu(vpa->dtl_idx))
- return 0;
- while (i < be64_to_cpu(vpa->dtl_idx)) {
- dtb = be64_to_cpu(dtl->timebase);
- tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
- be32_to_cpu(dtl->ready_to_enqueue_time);
- barrier();
- if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
- /* buffer has overflowed */
- i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
- dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
- continue;
- }
- if (dtb > stop_tb)
- break;
- if (dtl_consumer)
- dtl_consumer(dtl, i);
- stolen += tb_delta;
- ++i;
- ++dtl;
- if (dtl == dtl_end)
- dtl = local_paca->dispatch_log;
- }
- local_paca->dtl_ridx = i;
- local_paca->dtl_curr = dtl;
- return stolen;
-}
-
-/*
- * Accumulate stolen time by scanning the dispatch trace log.
- * Called on entry from user mode.
- */
-void notrace pseries_accumulate_stolen_time(void)
-{
- u64 sst, ust;
- struct cpu_accounting_data *acct = &local_paca->accounting;
-
- sst = scan_dispatch_log(acct->starttime_user);
- ust = scan_dispatch_log(acct->starttime);
- acct->stime -= sst;
- acct->utime -= ust;
- acct->steal_time += ust + sst;
-}
-
-u64 pseries_calculate_stolen_time(u64 stop_tb)
-{
- if (!firmware_has_feature(FW_FEATURE_SPLPAR))
- return 0;
-
- if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
- return scan_dispatch_log(stop_tb);
-
- return 0;
-}
-
-/*
* The cpu accounting code controls the DTL ring buffer, and we get
* given entries as they are processed.
*/
@@ -436,3 +365,81 @@ static int dtl_init(void)
return 0;
}
machine_arch_initcall(pseries, dtl_init);
+#endif /* CONFIG_DTL */
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+/*
+ * Scan the dispatch trace log and count up the stolen time.
+ * Should be called with interrupts disabled.
+ */
+static notrace u64 scan_dispatch_log(u64 stop_tb)
+{
+ u64 i = local_paca->dtl_ridx;
+ struct dtl_entry *dtl = local_paca->dtl_curr;
+ struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
+ struct lppaca *vpa = local_paca->lppaca_ptr;
+ u64 tb_delta;
+ u64 stolen = 0;
+ u64 dtb;
+
+ if (!dtl)
+ return 0;
+
+ if (i == be64_to_cpu(vpa->dtl_idx))
+ return 0;
+ while (i < be64_to_cpu(vpa->dtl_idx)) {
+ dtb = be64_to_cpu(dtl->timebase);
+ tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
+ be32_to_cpu(dtl->ready_to_enqueue_time);
+ barrier();
+ if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
+ /* buffer has overflowed */
+ i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
+ dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
+ continue;
+ }
+ if (dtb > stop_tb)
+ break;
+#ifdef CONFIG_DTL
+ if (dtl_consumer)
+ dtl_consumer(dtl, i);
+#endif
+ stolen += tb_delta;
+ ++i;
+ ++dtl;
+ if (dtl == dtl_end)
+ dtl = local_paca->dispatch_log;
+ }
+ local_paca->dtl_ridx = i;
+ local_paca->dtl_curr = dtl;
+ return stolen;
+}
+
+/*
+ * Accumulate stolen time by scanning the dispatch trace log.
+ * Called on entry from user mode.
+ */
+void notrace pseries_accumulate_stolen_time(void)
+{
+ u64 sst, ust;
+ struct cpu_accounting_data *acct = &local_paca->accounting;
+
+ sst = scan_dispatch_log(acct->starttime_user);
+ ust = scan_dispatch_log(acct->starttime);
+ acct->stime -= sst;
+ acct->utime -= ust;
+ acct->steal_time += ust + sst;
+}
+
+u64 pseries_calculate_stolen_time(u64 stop_tb)
+{
+ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+ return 0;
+
+ if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
+ return scan_dispatch_log(stop_tb);
+
+ return 0;
+}
+
+#endif
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 56976e5674ee..6b48a3ae9843 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -70,6 +70,7 @@ config RISCV
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO
+ select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index e9bd21816ea9..1c8ec656e916 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -37,6 +37,7 @@ else
endif
ifeq ($(CONFIG_LD_IS_LLD),y)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 150000; echo $$?),0)
KBUILD_CFLAGS += -mno-relax
KBUILD_AFLAGS += -mno-relax
ifndef CONFIG_AS_IS_LLVM
@@ -44,6 +45,7 @@ ifndef CONFIG_AS_IS_LLVM
KBUILD_AFLAGS += -Wa,-mno-relax
endif
endif
+endif
# ISA string setting
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile
index 39aae7b04f1c..7427a20934f3 100644
--- a/arch/riscv/boot/dts/microchip/Makefile
+++ b/arch/riscv/boot/dts/microchip/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += mpfs-icicle-kit.dtb
+dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += mpfs-m100pfsevp.dtb
dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += mpfs-polarberry.dtb
+dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += mpfs-sev-kit.dtb
obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
index 0d28858b83f2..24b1cfb9a73e 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
@@ -2,20 +2,21 @@
/* Copyright (c) 2020-2021 Microchip Technology Inc */
/ {
- compatible = "microchip,mpfs-icicle-reference-rtlv2203", "microchip,mpfs";
+ compatible = "microchip,mpfs-icicle-reference-rtlv2210", "microchip,mpfs-icicle-kit",
+ "microchip,mpfs";
- core_pwm0: pwm@41000000 {
+ core_pwm0: pwm@40000000 {
compatible = "microchip,corepwm-rtl-v4";
- reg = <0x0 0x41000000 0x0 0xF0>;
+ reg = <0x0 0x40000000 0x0 0xF0>;
microchip,sync-update-mask = /bits/ 32 <0>;
#pwm-cells = <2>;
clocks = <&fabric_clk3>;
status = "disabled";
};
- i2c2: i2c@44000000 {
+ i2c2: i2c@40000200 {
compatible = "microchip,corei2c-rtl-v7";
- reg = <0x0 0x44000000 0x0 0x1000>;
+ reg = <0x0 0x40000200 0x0 0x100>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&fabric_clk3>;
@@ -28,7 +29,7 @@
fabric_clk3: fabric-clk3 {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <62500000>;
+ clock-frequency = <50000000>;
};
fabric_clk1: fabric-clk1 {
@@ -36,4 +37,34 @@
#clock-cells = <0>;
clock-frequency = <125000000>;
};
+
+ pcie: pcie@3000000000 {
+ compatible = "microchip,pcie-host-1.0";
+ #address-cells = <0x3>;
+ #interrupt-cells = <0x1>;
+ #size-cells = <0x2>;
+ device_type = "pci";
+ reg = <0x30 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>;
+ reg-names = "cfg", "apb";
+ bus-range = <0x0 0x7f>;
+ interrupt-parent = <&plic>;
+ interrupts = <119>;
+ interrupt-map = <0 0 0 1 &pcie_intc 0>,
+ <0 0 0 2 &pcie_intc 1>,
+ <0 0 0 3 &pcie_intc 2>,
+ <0 0 0 4 &pcie_intc 3>;
+ interrupt-map-mask = <0 0 0 7>;
+ clocks = <&fabric_clk1>, <&fabric_clk3>;
+ clock-names = "fic1", "fic3";
+ ranges = <0x3000000 0x0 0x8000000 0x30 0x8000000 0x0 0x80000000>;
+ dma-ranges = <0x02000000 0x0 0x00000000 0x0 0x00000000 0x1 0x00000000>;
+ msi-parent = <&pcie>;
+ msi-controller;
+ status = "disabled";
+ pcie_intc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
};
diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
index f3f87ed2007f..ec7b7c2a3ce2 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
+++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts
@@ -11,7 +11,8 @@
/ {
model = "Microchip PolarFire-SoC Icicle Kit";
- compatible = "microchip,mpfs-icicle-kit", "microchip,mpfs";
+ compatible = "microchip,mpfs-icicle-reference-rtlv2210", "microchip,mpfs-icicle-kit",
+ "microchip,mpfs";
aliases {
ethernet0 = &mac1;
@@ -32,15 +33,26 @@
ddrc_cache_lo: memory@80000000 {
device_type = "memory";
- reg = <0x0 0x80000000 0x0 0x2e000000>;
+ reg = <0x0 0x80000000 0x0 0x40000000>;
status = "okay";
};
ddrc_cache_hi: memory@1000000000 {
device_type = "memory";
- reg = <0x10 0x0 0x0 0x40000000>;
+ reg = <0x10 0x40000000 0x0 0x40000000>;
status = "okay";
};
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ hss_payload: region@BFC00000 {
+ reg = <0x0 0xBFC00000 0x0 0x400000>;
+ no-map;
+ };
+ };
};
&core_pwm0 {
diff --git a/arch/riscv/boot/dts/microchip/mpfs-m100pfs-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-m100pfs-fabric.dtsi
new file mode 100644
index 000000000000..7b9ee13b6a3a
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/mpfs-m100pfs-fabric.dtsi
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2022 Microchip Technology Inc */
+
+/ {
+ fabric_clk3: fabric-clk3 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <62500000>;
+ };
+
+ fabric_clk1: fabric-clk1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+
+ pcie: pcie@2000000000 {
+ compatible = "microchip,pcie-host-1.0";
+ #address-cells = <0x3>;
+ #interrupt-cells = <0x1>;
+ #size-cells = <0x2>;
+ device_type = "pci";
+ reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>;
+ reg-names = "cfg", "apb";
+ bus-range = <0x0 0x7f>;
+ interrupt-parent = <&plic>;
+ interrupts = <119>;
+ interrupt-map = <0 0 0 1 &pcie_intc 0>,
+ <0 0 0 2 &pcie_intc 1>,
+ <0 0 0 3 &pcie_intc 2>,
+ <0 0 0 4 &pcie_intc 3>;
+ interrupt-map-mask = <0 0 0 7>;
+ clocks = <&fabric_clk1>, <&fabric_clk1>, <&fabric_clk3>;
+ clock-names = "fic0", "fic1", "fic3";
+ ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
+ msi-parent = <&pcie>;
+ msi-controller;
+ status = "disabled";
+ pcie_intc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/microchip/mpfs-m100pfsevp.dts b/arch/riscv/boot/dts/microchip/mpfs-m100pfsevp.dts
new file mode 100644
index 000000000000..184cb36a175e
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/mpfs-m100pfsevp.dts
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Original all-in-one devicetree:
+ * Copyright (C) 2021-2022 - Wolfgang Grandegger <wg@aries-embedded.de>
+ * Rewritten to use includes:
+ * Copyright (C) 2022 - Conor Dooley <conor.dooley@microchip.com>
+ */
+/dts-v1/;
+
+#include "mpfs.dtsi"
+#include "mpfs-m100pfs-fabric.dtsi"
+
+/* Clock frequency (in Hz) of the rtcclk */
+#define MTIMER_FREQ 1000000
+
+/ {
+ model = "Aries Embedded M100PFEVPS";
+ compatible = "aries,m100pfsevp", "microchip,mpfs";
+
+ aliases {
+ ethernet0 = &mac0;
+ ethernet1 = &mac1;
+ serial0 = &mmuart0;
+ serial1 = &mmuart1;
+ serial2 = &mmuart2;
+ serial3 = &mmuart3;
+ serial4 = &mmuart4;
+ gpio0 = &gpio0;
+ gpio1 = &gpio2;
+ };
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+
+ cpus {
+ timebase-frequency = <MTIMER_FREQ>;
+ };
+
+ ddrc_cache_lo: memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x40000000>;
+ };
+ ddrc_cache_hi: memory@1040000000 {
+ device_type = "memory";
+ reg = <0x10 0x40000000 0x0 0x40000000>;
+ };
+};
+
+&can0 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+};
+
+&gpio0 {
+ interrupts = <13>, <14>, <15>, <16>,
+ <17>, <18>, <19>, <20>,
+ <21>, <22>, <23>, <24>,
+ <25>, <26>;
+ ngpios = <14>;
+ status = "okay";
+
+ pmic-irq-hog {
+ gpio-hog;
+ gpios = <13 0>;
+ input;
+ };
+
+ /* Set to low for eMMC, high for SD-card */
+ mmc-sel-hog {
+ gpio-hog;
+ gpios = <12 0>;
+ output-high;
+ };
+};
+
+&gpio2 {
+ interrupts = <13>, <14>, <15>, <16>,
+ <17>, <18>, <19>, <20>,
+ <21>, <22>, <23>, <24>,
+ <25>, <26>, <27>, <28>,
+ <29>, <30>, <31>, <32>,
+ <33>, <34>, <35>, <36>,
+ <37>, <38>, <39>, <40>,
+ <41>, <42>, <43>, <44>;
+ status = "okay";
+};
+
+&mac0 {
+ status = "okay";
+ phy-mode = "gmii";
+ phy-handle = <&phy0>;
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+};
+
+&mac1 {
+ status = "okay";
+ phy-mode = "gmii";
+ phy-handle = <&phy1>;
+ phy1: ethernet-phy@0 {
+ reg = <0>;
+ };
+};
+
+&mbox {
+ status = "okay";
+};
+
+&mmc {
+ max-frequency = <50000000>;
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ no-1-8-v;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ disable-wp;
+ status = "okay";
+};
+
+&mmuart1 {
+ status = "okay";
+};
+
+&mmuart2 {
+ status = "okay";
+};
+
+&mmuart3 {
+ status = "okay";
+};
+
+&mmuart4 {
+ status = "okay";
+};
+
+&pcie {
+ status = "okay";
+};
+
+&qspi {
+ status = "okay";
+};
+
+&refclk {
+ clock-frequency = <125000000>;
+};
+
+&rtc {
+ status = "okay";
+};
+
+&spi0 {
+ status = "okay";
+};
+
+&spi1 {
+ status = "okay";
+};
+
+&syscontroller {
+ status = "okay";
+};
+
+&usb {
+ status = "okay";
+ dr_mode = "host";
+};
diff --git a/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi
index 49380c428ec9..67303bc0e451 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi
@@ -13,4 +13,33 @@
#clock-cells = <0>;
clock-frequency = <125000000>;
};
+
+ pcie: pcie@2000000000 {
+ compatible = "microchip,pcie-host-1.0";
+ #address-cells = <0x3>;
+ #interrupt-cells = <0x1>;
+ #size-cells = <0x2>;
+ device_type = "pci";
+ reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>;
+ reg-names = "cfg", "apb";
+ bus-range = <0x0 0x7f>;
+ interrupt-parent = <&plic>;
+ interrupts = <119>;
+ interrupt-map = <0 0 0 1 &pcie_intc 0>,
+ <0 0 0 2 &pcie_intc 1>,
+ <0 0 0 3 &pcie_intc 2>,
+ <0 0 0 4 &pcie_intc 3>;
+ interrupt-map-mask = <0 0 0 7>;
+ clocks = <&fabric_clk1>, <&fabric_clk1>, <&fabric_clk3>;
+ clock-names = "fic0", "fic1", "fic3";
+ ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
+ msi-parent = <&pcie>;
+ msi-controller;
+ status = "disabled";
+ pcie_intc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
};
diff --git a/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi
new file mode 100644
index 000000000000..8545baf4d129
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/mpfs-sev-kit-fabric.dtsi
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2022 Microchip Technology Inc */
+
+/ {
+ fabric_clk3: fabric-clk3 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ fabric_clk1: fabric-clk1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+
+ pcie: pcie@2000000000 {
+ compatible = "microchip,pcie-host-1.0";
+ #address-cells = <0x3>;
+ #interrupt-cells = <0x1>;
+ #size-cells = <0x2>;
+ device_type = "pci";
+ reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>;
+ reg-names = "cfg", "apb";
+ bus-range = <0x0 0x7f>;
+ interrupt-parent = <&plic>;
+ interrupts = <119>;
+ interrupt-map = <0 0 0 1 &pcie_intc 0>,
+ <0 0 0 2 &pcie_intc 1>,
+ <0 0 0 3 &pcie_intc 2>,
+ <0 0 0 4 &pcie_intc 3>;
+ interrupt-map-mask = <0 0 0 7>;
+ clocks = <&fabric_clk1>, <&fabric_clk1>, <&fabric_clk3>;
+ clock-names = "fic0", "fic1", "fic3";
+ ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
+ msi-parent = <&pcie>;
+ msi-controller;
+ status = "disabled";
+ pcie_intc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/microchip/mpfs-sev-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-sev-kit.dts
new file mode 100644
index 000000000000..013cb666c72d
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/mpfs-sev-kit.dts
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2022 Microchip Technology Inc */
+
+/dts-v1/;
+
+#include "mpfs.dtsi"
+#include "mpfs-sev-kit-fabric.dtsi"
+
+/* Clock frequency (in Hz) of the rtcclk */
+#define MTIMER_FREQ 1000000
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ model = "Microchip PolarFire-SoC SEV Kit";
+ compatible = "microchip,mpfs-sev-kit", "microchip,mpfs";
+
+ aliases {
+ ethernet0 = &mac1;
+ serial0 = &mmuart0;
+ serial1 = &mmuart1;
+ serial2 = &mmuart2;
+ serial3 = &mmuart3;
+ serial4 = &mmuart4;
+ };
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+
+ cpus {
+ timebase-frequency = <MTIMER_FREQ>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ fabricbuf0ddrc: buffer@80000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x80000000 0x0 0x2000000>;
+ };
+
+ fabricbuf1ddrnc: buffer@c4000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0xc4000000 0x0 0x4000000>;
+ };
+
+ fabricbuf2ddrncwcb: buffer@d4000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0xd4000000 0x0 0x4000000>;
+ };
+ };
+
+ ddrc_cache: memory@1000000000 {
+ device_type = "memory";
+ reg = <0x10 0x0 0x0 0x76000000>;
+ };
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&gpio2 {
+ interrupts = <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>,
+ <53>, <53>, <53>, <53>;
+ status = "okay";
+};
+
+&mac0 {
+ status = "okay";
+ phy-mode = "sgmii";
+ phy-handle = <&phy0>;
+ phy1: ethernet-phy@9 {
+ reg = <9>;
+ };
+ phy0: ethernet-phy@8 {
+ reg = <8>;
+ };
+};
+
+&mac1 {
+ status = "okay";
+ phy-mode = "sgmii";
+ phy-handle = <&phy1>;
+};
+
+&mbox {
+ status = "okay";
+};
+
+&mmc {
+ status = "okay";
+ bus-width = <4>;
+ disable-wp;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+};
+
+&mmuart1 {
+ status = "okay";
+};
+
+&mmuart2 {
+ status = "okay";
+};
+
+&mmuart3 {
+ status = "okay";
+};
+
+&mmuart4 {
+ status = "okay";
+};
+
+&refclk {
+ clock-frequency = <125000000>;
+};
+
+&rtc {
+ status = "okay";
+};
+
+&syscontroller {
+ status = "okay";
+};
+
+&usb {
+ status = "okay";
+ dr_mode = "otg";
+};
diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi
index 6d9d455fa160..8f463399a568 100644
--- a/arch/riscv/boot/dts/microchip/mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi
@@ -330,7 +330,7 @@
};
qspi: spi@21000000 {
- compatible = "microchip,mpfs-qspi";
+ compatible = "microchip,mpfs-qspi", "microchip,coreqspi-rtl-v2";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x21000000 0x0 0x1000>;
@@ -464,35 +464,6 @@
status = "disabled";
};
- pcie: pcie@2000000000 {
- compatible = "microchip,pcie-host-1.0";
- #address-cells = <0x3>;
- #interrupt-cells = <0x1>;
- #size-cells = <0x2>;
- device_type = "pci";
- reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>;
- reg-names = "cfg", "apb";
- bus-range = <0x0 0x7f>;
- interrupt-parent = <&plic>;
- interrupts = <119>;
- interrupt-map = <0 0 0 1 &pcie_intc 0>,
- <0 0 0 2 &pcie_intc 1>,
- <0 0 0 3 &pcie_intc 2>,
- <0 0 0 4 &pcie_intc 3>;
- interrupt-map-mask = <0 0 0 7>;
- clocks = <&fabric_clk1>, <&fabric_clk1>, <&fabric_clk3>;
- clock-names = "fic0", "fic1", "fic3";
- ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
- msi-parent = <&pcie>;
- msi-controller;
- status = "disabled";
- pcie_intc: interrupt-controller {
- #address-cells = <0>;
- #interrupt-cells = <1>;
- interrupt-controller;
- };
- };
-
mbox: mailbox@37020000 {
compatible = "microchip,mpfs-mailbox";
reg = <0x0 0x37020000 0x0 0x1000>, <0x0 0x2000318C 0x0 0x40>;
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index 96648c176f37..21546937db39 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -17,6 +17,9 @@
static bool errata_probe_pbmt(unsigned int stage,
unsigned long arch_id, unsigned long impid)
{
+ if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PBMT))
+ return false;
+
if (arch_id != 0 || impid != 0)
return false;
@@ -30,7 +33,9 @@ static bool errata_probe_pbmt(unsigned int stage,
static bool errata_probe_cmo(unsigned int stage,
unsigned long arch_id, unsigned long impid)
{
-#ifdef CONFIG_ERRATA_THEAD_CMO
+ if (!IS_ENABLED(CONFIG_ERRATA_THEAD_CMO))
+ return false;
+
if (arch_id != 0 || impid != 0)
return false;
@@ -40,9 +45,6 @@ static bool errata_probe_cmo(unsigned int stage,
riscv_cbom_block_size = L1_CACHE_BYTES;
riscv_noncoherent_supported();
return true;
-#else
- return false;
-#endif
}
static u32 thead_errata_probe(unsigned int stage,
@@ -51,10 +53,10 @@ static u32 thead_errata_probe(unsigned int stage,
u32 cpu_req_errata = 0;
if (errata_probe_pbmt(stage, archid, impid))
- cpu_req_errata |= (1U << ERRATA_THEAD_PBMT);
+ cpu_req_errata |= BIT(ERRATA_THEAD_PBMT);
if (errata_probe_cmo(stage, archid, impid))
- cpu_req_errata |= (1U << ERRATA_THEAD_CMO);
+ cpu_req_errata |= BIT(ERRATA_THEAD_CMO);
return cpu_req_errata;
}
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 273ece6b622f..8a5c246b0a21 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -55,6 +55,8 @@ static inline void riscv_init_cbom_blocksize(void) { }
#ifdef CONFIG_RISCV_DMA_NONCOHERENT
void riscv_noncoherent_supported(void);
+#else
+static inline void riscv_noncoherent_supported(void) {}
#endif
/*
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index 14fc7342490b..e7acffdf21d2 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -99,6 +99,10 @@ do { \
get_cache_size(2, CACHE_TYPE_UNIFIED)); \
NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, \
get_cache_geometry(2, CACHE_TYPE_UNIFIED)); \
+ NEW_AUX_ENT(AT_L3_CACHESIZE, \
+ get_cache_size(3, CACHE_TYPE_UNIFIED)); \
+ NEW_AUX_ENT(AT_L3_CACHEGEOMETRY, \
+ get_cache_geometry(3, CACHE_TYPE_UNIFIED)); \
} while (0)
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 69605a474270..92080a227937 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -101,9 +101,9 @@ __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr))
__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
-#define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count)
-#define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count)
-#define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count)
+#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count)
+#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count)
+#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count)
__io_writes_outs(writes, u8, b, __io_bw(), __io_aw())
__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
@@ -115,22 +115,22 @@ __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw())
__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
-#define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count)
-#define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count)
-#define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count)
+#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count)
+#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count)
+#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count)
#ifdef CONFIG_64BIT
__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
-#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count)
+#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count)
__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
-#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count)
+#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count)
#endif
#include <asm-generic/io.h>
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index cedcf8ea3c76..0099dc116168 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -16,7 +16,6 @@ typedef struct {
atomic_long_t id;
#endif
void *vdso;
- void *vdso_info;
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
index 32c73ba1d531..fb187a33ce58 100644
--- a/arch/riscv/include/uapi/asm/auxvec.h
+++ b/arch/riscv/include/uapi/asm/auxvec.h
@@ -30,8 +30,10 @@
#define AT_L1D_CACHEGEOMETRY 43
#define AT_L2_CACHESIZE 44
#define AT_L2_CACHEGEOMETRY 45
+#define AT_L3_CACHESIZE 46
+#define AT_L3_CACHEGEOMETRY 47
/* entries in ARCH_DLINFO */
-#define AT_VECTOR_SIZE_ARCH 7
+#define AT_VECTOR_SIZE_ARCH 9
#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 4d0dece5996c..fa427bdcf773 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -3,10 +3,13 @@
* Copyright (C) 2012 Regents of the University of California
*/
+#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/of.h>
+#include <asm/csr.h>
#include <asm/hwcap.h>
+#include <asm/sbi.h>
#include <asm/smp.h>
#include <asm/pgtable.h>
@@ -68,6 +71,50 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
}
#ifdef CONFIG_PROC_FS
+
+struct riscv_cpuinfo {
+ unsigned long mvendorid;
+ unsigned long marchid;
+ unsigned long mimpid;
+};
+static DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+
+static int riscv_cpuinfo_starting(unsigned int cpu)
+{
+ struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
+
+#if IS_ENABLED(CONFIG_RISCV_SBI)
+ ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
+ ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
+ ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid();
+#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
+ ci->mvendorid = csr_read(CSR_MVENDORID);
+ ci->marchid = csr_read(CSR_MARCHID);
+ ci->mimpid = csr_read(CSR_MIMPID);
+#else
+ ci->mvendorid = 0;
+ ci->marchid = 0;
+ ci->mimpid = 0;
+#endif
+
+ return 0;
+}
+
+static int __init riscv_cpuinfo_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/cpuinfo:starting",
+ riscv_cpuinfo_starting, NULL);
+ if (ret < 0) {
+ pr_err("cpuinfo: failed to register hotplug callbacks.\n");
+ return ret;
+ }
+
+ return 0;
+}
+device_initcall(riscv_cpuinfo_init);
+
#define __RISCV_ISA_EXT_DATA(UPROP, EXTID) \
{ \
.uprop = #UPROP, \
@@ -186,6 +233,7 @@ static int c_show(struct seq_file *m, void *v)
{
unsigned long cpu_id = (unsigned long)v - 1;
struct device_node *node = of_get_cpu_node(cpu_id, NULL);
+ struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
const char *compat, *isa;
seq_printf(m, "processor\t: %lu\n", cpu_id);
@@ -196,6 +244,9 @@ static int c_show(struct seq_file *m, void *v)
if (!of_property_read_string(node, "compatible", &compat)
&& strcmp(compat, "riscv"))
seq_printf(m, "uarch\t\t: %s\n", compat);
+ seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
+ seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid);
+ seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid);
seq_puts(m, "\n");
of_node_put(node);
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 9774f1271f93..694267d1fe81 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -254,35 +254,28 @@ void __init riscv_fill_hwcap(void)
#ifdef CONFIG_RISCV_ALTERNATIVE
static bool __init_or_module cpufeature_probe_svpbmt(unsigned int stage)
{
-#ifdef CONFIG_RISCV_ISA_SVPBMT
- switch (stage) {
- case RISCV_ALTERNATIVES_EARLY_BOOT:
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_SVPBMT))
+ return false;
+
+ if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return false;
- default:
- return riscv_isa_extension_available(NULL, SVPBMT);
- }
-#endif
- return false;
+ return riscv_isa_extension_available(NULL, SVPBMT);
}
static bool __init_or_module cpufeature_probe_zicbom(unsigned int stage)
{
-#ifdef CONFIG_RISCV_ISA_ZICBOM
- switch (stage) {
- case RISCV_ALTERNATIVES_EARLY_BOOT:
+ if (!IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM))
+ return false;
+
+ if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
+ return false;
+
+ if (!riscv_isa_extension_available(NULL, ZICBOM))
return false;
- default:
- if (riscv_isa_extension_available(NULL, ZICBOM)) {
- riscv_noncoherent_supported();
- return true;
- } else {
- return false;
- }
- }
-#endif
- return false;
+ riscv_noncoherent_supported();
+ return true;
}
/*
@@ -297,10 +290,10 @@ static u32 __init_or_module cpufeature_probe(unsigned int stage)
u32 cpu_req_feature = 0;
if (cpufeature_probe_svpbmt(stage))
- cpu_req_feature |= (1U << CPUFEATURE_SVPBMT);
+ cpu_req_feature |= BIT(CPUFEATURE_SVPBMT);
if (cpufeature_probe_zicbom(stage))
- cpu_req_feature |= (1U << CPUFEATURE_ZICBOM);
+ cpu_req_feature |= BIT(CPUFEATURE_ZICBOM);
return cpu_req_feature;
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 2dfc463b86bb..ad76bb59b059 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -252,10 +252,10 @@ static void __init parse_dtb(void)
pr_info("Machine model: %s\n", name);
dump_stack_set_arch_desc("%s (DT)", name);
}
- return;
+ } else {
+ pr_err("No DTB passed to the kernel\n");
}
- pr_err("No DTB passed to the kernel\n");
#ifdef CONFIG_CMDLINE_FORCE
strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
pr_info("Forcing kernel command line to: %s\n", boot_command_line);
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 571556bb9261..5d3f2fbeb33c 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -18,9 +18,6 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
return -EINVAL;
- if (unlikely((prot & PROT_WRITE) && !(prot & PROT_READ)))
- return -EINVAL;
-
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> (PAGE_SHIFT - page_shift_offset));
}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 635e6ec26938..f3e96d60a2ff 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -33,6 +33,7 @@ void die(struct pt_regs *regs, const char *str)
{
static int die_counter;
int ret;
+ long cause;
oops_enter();
@@ -42,11 +43,13 @@ void die(struct pt_regs *regs, const char *str)
pr_emerg("%s [#%d]\n", str, ++die_counter);
print_modules();
- show_regs(regs);
+ if (regs)
+ show_regs(regs);
- ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV);
+ cause = regs ? regs->cause : -1;
+ ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
- if (regs && kexec_should_crash(current))
+ if (kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 692e7ae3dcb8..123d05255fcf 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -60,6 +60,11 @@ struct __vdso_info {
struct vm_special_mapping *cm;
};
+static struct __vdso_info vdso_info;
+#ifdef CONFIG_COMPAT
+static struct __vdso_info compat_vdso_info;
+#endif
+
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
@@ -115,15 +120,18 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
- struct __vdso_info *vdso_info = mm->context.vdso_info;
mmap_read_lock(mm);
for_each_vma(vmi, vma) {
unsigned long size = vma->vm_end - vma->vm_start;
- if (vma_is_special_mapping(vma, vdso_info->dm))
+ if (vma_is_special_mapping(vma, vdso_info.dm))
zap_page_range(vma, vma->vm_start, size);
+#ifdef CONFIG_COMPAT
+ if (vma_is_special_mapping(vma, compat_vdso_info.dm))
+ zap_page_range(vma, vma->vm_start, size);
+#endif
}
mmap_read_unlock(mm);
@@ -265,7 +273,6 @@ static int __setup_additional_pages(struct mm_struct *mm,
vdso_base += VVAR_SIZE;
mm->context.vdso = (void *)vdso_base;
- mm->context.vdso_info = (void *)vdso_info;
ret =
_install_special_mapping(mm, vdso_base, vdso_text_len,
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index f2fbd1400b7c..d86f7cebd4a7 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -184,7 +184,8 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
}
break;
case EXC_LOAD_PAGE_FAULT:
- if (!(vma->vm_flags & VM_READ)) {
+ /* Write implies read */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
return true;
}
break;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index d5119e039d85..42af4b3aa02b 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -224,13 +224,13 @@ unsigned long __get_wchan(struct task_struct *p)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= prandom_u32_max(PAGE_SIZE);
return sp & ~0xf;
}
static inline unsigned long brk_rnd(void)
{
- return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
+ return (get_random_u16() & BRK_RND_MASK) << PAGE_SHIFT;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 535099f2736d..3105ca5bd470 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -227,7 +227,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len)
end -= len;
if (end > start) {
- offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+ offset = prandom_u32_max(((end - start) >> PAGE_SHIFT) + 1);
addr = start + (offset << PAGE_SHIFT);
} else {
addr = start;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 5980ce348832..3327c47bc181 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -37,7 +37,7 @@ static inline int mmap_is_legacy(struct rlimit *rlim_stack)
unsigned long arch_mmap_rnd(void)
{
- return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
+ return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
}
static unsigned long mmap_base_legacy(unsigned long rnd)
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
index cc19e09b0fa1..ae9a86cb6f3d 100644
--- a/arch/sparc/vdso/vma.c
+++ b/arch/sparc/vdso/vma.c
@@ -354,7 +354,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned int len)
unsigned int offset;
/* This loses some more bits than a modulo, but is cheaper */
- offset = get_random_int() & (PTRS_PER_PTE - 1);
+ offset = prandom_u32_max(PTRS_PER_PTE);
return start + (offset << PAGE_SHIFT);
}
diff --git a/arch/um/drivers/chan.h b/arch/um/drivers/chan.h
index c37cc4f26f91..3fec3b8406e9 100644
--- a/arch/um/drivers/chan.h
+++ b/arch/um/drivers/chan.h
@@ -36,7 +36,6 @@ extern int console_write_chan(struct chan *chan, const char *buf,
int len);
extern int console_open_chan(struct line *line, struct console *co);
extern void deactivate_chan(struct chan *chan, int irq);
-extern void reactivate_chan(struct chan *chan, int irq);
extern void chan_enable_winch(struct chan *chan, struct tty_port *port);
extern int enable_chan(struct line *line);
extern void close_chan(struct line *line);
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 8ca67a692683..5026e7b9adfe 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -283,7 +283,7 @@ struct unplugged_pages {
};
static DEFINE_MUTEX(plug_mem_mutex);
-static unsigned long long unplugged_pages_count = 0;
+static unsigned long long unplugged_pages_count;
static LIST_HEAD(unplugged_pages);
static int unplug_index = UNPLUGGED_PER_PAGE;
@@ -846,13 +846,12 @@ static int notify_panic(struct notifier_block *self, unsigned long unused1,
mconsole_notify(notify_socket, MCONSOLE_PANIC, message,
strlen(message) + 1);
- return 0;
+ return NOTIFY_DONE;
}
static struct notifier_block panic_exit_notifier = {
- .notifier_call = notify_panic,
- .next = NULL,
- .priority = 1
+ .notifier_call = notify_panic,
+ .priority = INT_MAX, /* run as soon as possible */
};
static int add_notifier(void)
diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c
index 0bf78ff89011..807cd3358740 100644
--- a/arch/um/drivers/mmapper_kern.c
+++ b/arch/um/drivers/mmapper_kern.c
@@ -122,7 +122,7 @@ static int __init mmapper_init(void)
return 0;
}
-static void mmapper_exit(void)
+static void __exit mmapper_exit(void)
{
misc_deregister(&mmapper_dev);
}
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 59331384c2d3..3d7836c46507 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -265,7 +265,7 @@ static void uml_net_poll_controller(struct net_device *dev)
static void uml_net_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
}
static const struct ethtool_ops uml_net_ethtool_ops = {
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 8514966778d5..277cea3d30eb 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -106,7 +106,7 @@ static const struct tty_operations ssl_ops = {
/* Changed by ssl_init and referenced by ssl_exit, which are both serialized
* by being an initcall and exitcall, respectively.
*/
-static int ssl_init_done = 0;
+static int ssl_init_done;
static void ssl_console_write(struct console *c, const char *string,
unsigned len)
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 489d5a746ed3..1c239737d88e 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -88,7 +88,7 @@ static int con_remove(int n, char **error_out)
}
/* Set in an initcall, checked in an exitcall */
-static int con_init_done = 0;
+static int con_init_done;
static int con_install(struct tty_driver *driver, struct tty_struct *tty)
{
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index eb2d2f0f0bcc..f4c1e6e97ad5 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1555,7 +1555,7 @@ static void do_io(struct io_thread_req *req, struct io_desc *desc)
int kernel_fd = -1;
/* Only changed by the io thread. XXX: currently unused. */
-static int io_count = 0;
+static int io_count;
int io_thread(void *arg)
{
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 548265312743..ded7c47d2fbe 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1372,7 +1372,7 @@ static void vector_net_poll_controller(struct net_device *dev)
static void vector_net_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
}
static int vector_net_load_bpf_flash(struct net_device *dev,
diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
index 027847023184..acb55b302b14 100644
--- a/arch/um/drivers/virt-pci.c
+++ b/arch/um/drivers/virt-pci.c
@@ -857,7 +857,7 @@ void *pci_root_bus_fwnode(struct pci_bus *bus)
return um_pci_fwnode;
}
-static int um_pci_init(void)
+static int __init um_pci_init(void)
{
int err, i;
@@ -940,7 +940,7 @@ free:
}
module_init(um_pci_init);
-static void um_pci_exit(void)
+static void __exit um_pci_exit(void)
{
unregister_virtio_driver(&um_pci_virtio_driver);
irq_domain_remove(um_pci_msi_domain);
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index e719af8bdf56..588930a0ced1 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -374,45 +374,48 @@ static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
u8 extra_payload[512];
} msg;
int rc;
+ irqreturn_t irq_rc = IRQ_NONE;
- rc = vhost_user_recv_req(vu_dev, &msg.msg,
- sizeof(msg.msg.payload) +
- sizeof(msg.extra_payload));
-
- vu_dev->recv_rc = rc;
- if (rc)
- return IRQ_NONE;
-
- switch (msg.msg.header.request) {
- case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
- vu_dev->config_changed_irq = true;
- response = 0;
- break;
- case VHOST_USER_SLAVE_VRING_CALL:
- virtio_device_for_each_vq((&vu_dev->vdev), vq) {
- if (vq->index == msg.msg.payload.vring_state.index) {
- response = 0;
- vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
- break;
+ while (1) {
+ rc = vhost_user_recv_req(vu_dev, &msg.msg,
+ sizeof(msg.msg.payload) +
+ sizeof(msg.extra_payload));
+ if (rc)
+ break;
+
+ switch (msg.msg.header.request) {
+ case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
+ vu_dev->config_changed_irq = true;
+ response = 0;
+ break;
+ case VHOST_USER_SLAVE_VRING_CALL:
+ virtio_device_for_each_vq((&vu_dev->vdev), vq) {
+ if (vq->index == msg.msg.payload.vring_state.index) {
+ response = 0;
+ vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
+ break;
+ }
}
+ break;
+ case VHOST_USER_SLAVE_IOTLB_MSG:
+ /* not supported - VIRTIO_F_ACCESS_PLATFORM */
+ case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
+ /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
+ default:
+ vu_err(vu_dev, "unexpected slave request %d\n",
+ msg.msg.header.request);
}
- break;
- case VHOST_USER_SLAVE_IOTLB_MSG:
- /* not supported - VIRTIO_F_ACCESS_PLATFORM */
- case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
- /* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
- default:
- vu_err(vu_dev, "unexpected slave request %d\n",
- msg.msg.header.request);
- }
-
- if (ev && !vu_dev->suspended)
- time_travel_add_irq_event(ev);
- if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
- vhost_user_reply(vu_dev, &msg.msg, response);
+ if (ev && !vu_dev->suspended)
+ time_travel_add_irq_event(ev);
- return IRQ_HANDLED;
+ if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
+ vhost_user_reply(vu_dev, &msg.msg, response);
+ irq_rc = IRQ_HANDLED;
+ };
+ /* mask EAGAIN as we try non-blocking read until socket is empty */
+ vu_dev->recv_rc = (rc == -EAGAIN) ? 0 : rc;
+ return irq_rc;
}
static irqreturn_t vu_req_interrupt(int irq, void *data)
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index e7c7b53a1435..91485119ae67 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -169,7 +169,7 @@ __uml_setup("iomem=", parse_iomem,
);
/*
- * This list is constructed in parse_iomem and addresses filled in in
+ * This list is constructed in parse_iomem and addresses filled in
* setup_iomem, both of which run during early boot. Afterwards, it's
* unchanged.
*/
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 80b90b1276a1..010bc422a09d 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -356,7 +356,7 @@ int singlestepping(void * t)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() % 8192;
+ sp -= prandom_u32_max(8192);
return sp & ~0xf;
}
#endif
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index d9e023c78f56..8adf8e89b255 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -96,7 +96,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos)
{
- return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+ return *pos < nr_cpu_ids ? cpu_data + *pos : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
@@ -132,7 +132,7 @@ static int have_root __initdata;
static int have_console __initdata;
/* Set in uml_mem_setup and modified in linux_main */
-long long physmem_size = 32 * 1024 * 1024;
+long long physmem_size = 64 * 1024 * 1024;
EXPORT_SYMBOL(physmem_size);
static const char *usage_string =
@@ -247,13 +247,13 @@ static int panic_exit(struct notifier_block *self, unsigned long unused1,
bust_spinlocks(0);
uml_exitcode = 1;
os_dump_core();
- return 0;
+
+ return NOTIFY_DONE;
}
static struct notifier_block panic_exit_notifier = {
- .notifier_call = panic_exit,
- .next = NULL,
- .priority = 0
+ .notifier_call = panic_exit,
+ .priority = INT_MAX - 1, /* run as 2nd notifier, won't return */
};
void uml_finishsetup(void)
@@ -416,7 +416,7 @@ void __init setup_arch(char **cmdline_p)
read_initrd();
paging_init();
- strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+ strscpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
setup_hostinfo(host_info, sizeof host_info);
diff --git a/arch/um/kernel/umid.c b/arch/um/kernel/umid.c
index 8031a038eb58..72bc60ade347 100644
--- a/arch/um/kernel/umid.c
+++ b/arch/um/kernel/umid.c
@@ -9,7 +9,7 @@
#include <os.h>
/* Changed by set_umid_arg */
-static int umid_inited = 0;
+static int umid_inited;
static int __init set_umid_arg(char *name, int *add)
{
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 6292b960037b..311eae30e089 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -327,7 +327,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
end -= len;
if (end > start) {
- offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+ offset = prandom_u32_max(((end - start) >> PAGE_SHIFT) + 1);
addr = start + (offset << PAGE_SHIFT);
} else {
addr = start;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 48276c0e479d..860b60273df3 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -503,7 +503,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
/* A random value per boot for bit slice [12:upper_bit) */
- va_align.bits = get_random_int() & va_align.mask;
+ va_align.bits = get_random_u32() & va_align.mask;
}
if (cpu_has(c, X86_FEATURE_MWAITX))
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index b1abf663417c..c032edcd3d95 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -53,7 +53,7 @@ static unsigned long int get_module_load_offset(void)
*/
if (module_load_offset == 0)
module_load_offset =
- (get_random_int() % 1024 + 1) * PAGE_SIZE;
+ (prandom_u32_max(1024) + 1) * PAGE_SIZE;
mutex_unlock(&module_kaslr_mutex);
}
return module_load_offset;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 58a6ea472db9..c21b7347a26d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -965,7 +965,7 @@ early_param("idle", idle_setup);
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() % 8192;
+ sp -= prandom_u32_max(8192);
return sp & ~0xf;
}
diff --git a/arch/x86/mm/pat/cpa-test.c b/arch/x86/mm/pat/cpa-test.c
index 0612a73638a8..423b21e80929 100644
--- a/arch/x86/mm/pat/cpa-test.c
+++ b/arch/x86/mm/pat/cpa-test.c
@@ -136,10 +136,10 @@ static int pageattr_test(void)
failed += print_split(&sa);
for (i = 0; i < NTEST; i++) {
- unsigned long pfn = prandom_u32() % max_pfn_mapped;
+ unsigned long pfn = prandom_u32_max(max_pfn_mapped);
addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
- len[i] = prandom_u32() % NPAGES;
+ len[i] = prandom_u32_max(NPAGES);
len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
if (len[i] == 0)
diff --git a/block/bio.c b/block/bio.c
index ec350e040b64..633a902468ec 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -567,7 +567,7 @@ EXPORT_SYMBOL(bio_alloc_bioset);
* be reused by calling bio_uninit() before calling bio_init() again.
*
* Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
- * function are not backed by a mempool can can fail. Do not use this function
+ * function are not backed by a mempool can fail. Do not use this function
* for allocations in the file system I/O path.
*
* Returns: Pointer to new bio on success, NULL on failure.
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 621abd1b0e4d..ad9844c5b40c 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -539,7 +539,7 @@ static int blk_crypto_fallback_init(void)
if (blk_crypto_fallback_inited)
return 0;
- prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
+ get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
err = bioset_init(&crypto_bio_split, 64, 0, 0);
if (err)
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 246467926253..c293e08b301f 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -841,12 +841,11 @@ int wbt_init(struct request_queue *q)
rwb->last_comp = rwb->last_issue = jiffies;
rwb->win_nsec = RWB_WINDOW_NSEC;
rwb->enable_state = WBT_STATE_ON_DEFAULT;
- rwb->wc = 1;
+ rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags);
rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
wbt_queue_depth_changed(&rwb->rqos);
- wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
/*
* Assign rwb and add the stats callback.
diff --git a/block/genhd.c b/block/genhd.c
index 514395361d7c..17b33c62423d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -507,6 +507,13 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
*/
dev_set_uevent_suppress(ddev, 0);
disk_uevent(disk, KOBJ_ADD);
+ } else {
+ /*
+ * Even if the block_device for a hidden gendisk is not
+ * registered, it needs to have a valid bd_dev so that the
+ * freeing of the dynamic major works.
+ */
+ disk->part0->bd_dev = MKDEV(disk->major, disk->first_minor);
}
disk_update_readahead(disk);
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index 9719c7520661..d3fbee1e03e5 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -37,7 +37,7 @@ static void makedata(int disks)
int i;
for (i = 0; i < disks; i++) {
- prandom_bytes(page_address(data[i]), PAGE_SIZE);
+ get_random_bytes(page_address(data[i]), PAGE_SIZE);
dataptrs[i] = data[i];
dataoffs[i] = 0;
}
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e4bb03b8b924..bcd059caa1c8 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -855,9 +855,9 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
/* Generate a random length in range [0, max_len], but prefer smaller values */
static unsigned int generate_random_length(unsigned int max_len)
{
- unsigned int len = prandom_u32() % (max_len + 1);
+ unsigned int len = prandom_u32_max(max_len + 1);
- switch (prandom_u32() % 4) {
+ switch (prandom_u32_max(4)) {
case 0:
return len % 64;
case 1:
@@ -874,14 +874,14 @@ static void flip_random_bit(u8 *buf, size_t size)
{
size_t bitpos;
- bitpos = prandom_u32() % (size * 8);
+ bitpos = prandom_u32_max(size * 8);
buf[bitpos / 8] ^= 1 << (bitpos % 8);
}
/* Flip a random byte in the given nonempty data buffer */
static void flip_random_byte(u8 *buf, size_t size)
{
- buf[prandom_u32() % size] ^= 0xff;
+ buf[prandom_u32_max(size)] ^= 0xff;
}
/* Sometimes make some random changes to the given nonempty data buffer */
@@ -891,15 +891,15 @@ static void mutate_buffer(u8 *buf, size_t size)
size_t i;
/* Sometimes flip some bits */
- if (prandom_u32() % 4 == 0) {
- num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size * 8);
+ if (prandom_u32_max(4) == 0) {
+ num_flips = min_t(size_t, 1 << prandom_u32_max(8), size * 8);
for (i = 0; i < num_flips; i++)
flip_random_bit(buf, size);
}
/* Sometimes flip some bytes */
- if (prandom_u32() % 4 == 0) {
- num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size);
+ if (prandom_u32_max(4) == 0) {
+ num_flips = min_t(size_t, 1 << prandom_u32_max(8), size);
for (i = 0; i < num_flips; i++)
flip_random_byte(buf, size);
}
@@ -915,11 +915,11 @@ static void generate_random_bytes(u8 *buf, size_t count)
if (count == 0)
return;
- switch (prandom_u32() % 8) { /* Choose a generation strategy */
+ switch (prandom_u32_max(8)) { /* Choose a generation strategy */
case 0:
case 1:
/* All the same byte, plus optional mutations */
- switch (prandom_u32() % 4) {
+ switch (prandom_u32_max(4)) {
case 0:
b = 0x00;
break;
@@ -927,7 +927,7 @@ static void generate_random_bytes(u8 *buf, size_t count)
b = 0xff;
break;
default:
- b = (u8)prandom_u32();
+ b = get_random_u8();
break;
}
memset(buf, b, count);
@@ -935,8 +935,8 @@ static void generate_random_bytes(u8 *buf, size_t count)
break;
case 2:
/* Ascending or descending bytes, plus optional mutations */
- increment = (u8)prandom_u32();
- b = (u8)prandom_u32();
+ increment = get_random_u8();
+ b = get_random_u8();
for (i = 0; i < count; i++, b += increment)
buf[i] = b;
mutate_buffer(buf, count);
@@ -944,7 +944,7 @@ static void generate_random_bytes(u8 *buf, size_t count)
default:
/* Fully random bytes */
for (i = 0; i < count; i++)
- buf[i] = (u8)prandom_u32();
+ buf[i] = get_random_u8();
}
}
@@ -959,24 +959,24 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
unsigned int this_len;
const char *flushtype_str;
- if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0)
+ if (div == &divs[max_divs - 1] || prandom_u32_max(2) == 0)
this_len = remaining;
else
- this_len = 1 + (prandom_u32() % remaining);
+ this_len = 1 + prandom_u32_max(remaining);
div->proportion_of_total = this_len;
- if (prandom_u32() % 4 == 0)
- div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128);
- else if (prandom_u32() % 2 == 0)
- div->offset = prandom_u32() % 32;
+ if (prandom_u32_max(4) == 0)
+ div->offset = (PAGE_SIZE - 128) + prandom_u32_max(128);
+ else if (prandom_u32_max(2) == 0)
+ div->offset = prandom_u32_max(32);
else
- div->offset = prandom_u32() % PAGE_SIZE;
- if (prandom_u32() % 8 == 0)
+ div->offset = prandom_u32_max(PAGE_SIZE);
+ if (prandom_u32_max(8) == 0)
div->offset_relative_to_alignmask = true;
div->flush_type = FLUSH_TYPE_NONE;
if (gen_flushes) {
- switch (prandom_u32() % 4) {
+ switch (prandom_u32_max(4)) {
case 0:
div->flush_type = FLUSH_TYPE_REIMPORT;
break;
@@ -988,7 +988,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
if (div->flush_type != FLUSH_TYPE_NONE &&
!(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
- prandom_u32() % 2 == 0)
+ prandom_u32_max(2) == 0)
div->nosimd = true;
switch (div->flush_type) {
@@ -1035,7 +1035,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
p += scnprintf(p, end - p, "random:");
- switch (prandom_u32() % 4) {
+ switch (prandom_u32_max(4)) {
case 0:
case 1:
cfg->inplace_mode = OUT_OF_PLACE;
@@ -1050,12 +1050,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
- if (prandom_u32() % 2 == 0) {
+ if (prandom_u32_max(2) == 0) {
cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
p += scnprintf(p, end - p, " may_sleep");
}
- switch (prandom_u32() % 4) {
+ switch (prandom_u32_max(4)) {
case 0:
cfg->finalization_type = FINALIZATION_TYPE_FINAL;
p += scnprintf(p, end - p, " use_final");
@@ -1071,7 +1071,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
}
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
- prandom_u32() % 2 == 0) {
+ prandom_u32_max(2) == 0) {
cfg->nosimd = true;
p += scnprintf(p, end - p, " nosimd");
}
@@ -1084,7 +1084,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
cfg->req_flags);
p += scnprintf(p, end - p, "]");
- if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32() % 2 == 0) {
+ if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32_max(2) == 0) {
p += scnprintf(p, end - p, " dst_divs=[");
p = generate_random_sgl_divisions(cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs),
@@ -1093,13 +1093,13 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
p += scnprintf(p, end - p, "]");
}
- if (prandom_u32() % 2 == 0) {
- cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK);
+ if (prandom_u32_max(2) == 0) {
+ cfg->iv_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
}
- if (prandom_u32() % 2 == 0) {
- cfg->key_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK);
+ if (prandom_u32_max(2) == 0) {
+ cfg->key_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
}
@@ -1652,8 +1652,8 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
vec->ksize = 0;
if (maxkeysize) {
vec->ksize = maxkeysize;
- if (prandom_u32() % 4 == 0)
- vec->ksize = 1 + (prandom_u32() % maxkeysize);
+ if (prandom_u32_max(4) == 0)
+ vec->ksize = 1 + prandom_u32_max(maxkeysize);
generate_random_bytes((u8 *)vec->key, vec->ksize);
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
@@ -2218,13 +2218,13 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
const unsigned int authsize = vec->clen - vec->plen;
- if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) {
+ if (prandom_u32_max(2) == 0 && vec->alen > aad_tail_size) {
/* Mutate the AAD */
flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
- if (prandom_u32() % 2 == 0)
+ if (prandom_u32_max(2) == 0)
return;
}
- if (prandom_u32() % 2 == 0) {
+ if (prandom_u32_max(2) == 0) {
/* Mutate auth tag (assuming it's at the end of ciphertext) */
flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
} else {
@@ -2249,7 +2249,7 @@ static void generate_aead_message(struct aead_request *req,
const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int authsize = vec->clen - vec->plen;
const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
- (prefer_inauthentic || prandom_u32() % 4 == 0);
+ (prefer_inauthentic || prandom_u32_max(4) == 0);
/* Generate the AAD. */
generate_random_bytes((u8 *)vec->assoc, vec->alen);
@@ -2257,7 +2257,7 @@ static void generate_aead_message(struct aead_request *req,
/* Avoid implementation-defined behavior. */
memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
- if (inauthentic && prandom_u32() % 2 == 0) {
+ if (inauthentic && prandom_u32_max(2) == 0) {
/* Generate a random ciphertext. */
generate_random_bytes((u8 *)vec->ctext, vec->clen);
} else {
@@ -2321,8 +2321,8 @@ static void generate_random_aead_testvec(struct aead_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
- if (prandom_u32() % 4 == 0)
- vec->klen = prandom_u32() % (maxkeysize + 1);
+ if (prandom_u32_max(4) == 0)
+ vec->klen = prandom_u32_max(maxkeysize + 1);
generate_random_bytes((u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
@@ -2331,8 +2331,8 @@ static void generate_random_aead_testvec(struct aead_request *req,
/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
authsize = maxauthsize;
- if (prandom_u32() % 4 == 0)
- authsize = prandom_u32() % (maxauthsize + 1);
+ if (prandom_u32_max(4) == 0)
+ authsize = prandom_u32_max(maxauthsize + 1);
if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
authsize = MIN_COLLISION_FREE_AUTHSIZE;
if (WARN_ON(authsize > maxdatasize))
@@ -2342,7 +2342,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
/* AAD, plaintext, and ciphertext lengths */
total_len = generate_random_length(maxdatasize);
- if (prandom_u32() % 4 == 0)
+ if (prandom_u32_max(4) == 0)
vec->alen = 0;
else
vec->alen = generate_random_length(total_len);
@@ -2958,8 +2958,8 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
- if (prandom_u32() % 4 == 0)
- vec->klen = prandom_u32() % (maxkeysize + 1);
+ if (prandom_u32_max(4) == 0)
+ vec->klen = prandom_u32_max(maxkeysize + 1);
generate_random_bytes((u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index da7ee8bec165..7add8e79912b 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -257,7 +257,7 @@ enum {
PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
/* em constants */
- EM_MAX_SLOTS = 8,
+ EM_MAX_SLOTS = SATA_PMP_MAX_PORTS,
EM_MAX_RETRY = 5,
/* em_ctl bits */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index f61795c546cf..6f216eb25610 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -448,7 +448,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (!of_id)
return -ENODEV;
- priv->version = (enum brcm_ahci_version)of_id->data;
+ priv->version = (unsigned long)of_id->data;
priv->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl");
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index b734e069034d..a950767f7948 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -1067,7 +1067,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
imxpriv->ahci_pdev = pdev;
imxpriv->no_device = false;
imxpriv->first_time = true;
- imxpriv->type = (enum ahci_imx_type)of_id->data;
+ imxpriv->type = (unsigned long)of_id->data;
imxpriv->sata_clk = devm_clk_get(dev, "sata");
if (IS_ERR(imxpriv->sata_clk)) {
@@ -1235,4 +1235,4 @@ module_platform_driver(imx_ahci_driver);
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("ahci:imx");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 6cd61842ad48..9cf9bf36a874 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -280,7 +280,7 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
return -ENOMEM;
if (of_id)
- qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
+ qoriq_priv->type = (unsigned long)of_id->data;
else
qoriq_priv->type = (enum ahci_qoriq_type)acpi_id->driver_data;
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 5a2cac60a29a..8607b68eee53 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -236,7 +236,7 @@ static struct platform_driver st_ahci_driver = {
.driver = {
.name = DRV_NAME,
.pm = &st_ahci_pm_ops,
- .of_match_table = of_match_ptr(st_ahci_match),
+ .of_match_table = st_ahci_match,
},
.probe = st_ahci_probe,
.remove = ata_platform_remove_one,
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 7bb5db17f864..1e08704d5117 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -785,7 +785,7 @@ static int xgene_ahci_probe(struct platform_device *pdev)
of_devid = of_match_device(xgene_ahci_of_match, dev);
if (of_devid) {
if (of_devid->data)
- version = (enum xgene_ahci_version) of_devid->data;
+ version = (unsigned long) of_devid->data;
}
#ifdef CONFIG_ACPI
else {
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 590ebea99601..0195eb29f6c2 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -875,7 +875,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->type = (enum sata_rcar_type)of_device_get_match_data(dev);
+ priv->type = (unsigned long)of_device_get_match_data(dev);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c897c4572036..ee69d50ba4fd 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -781,7 +781,7 @@ static struct socket *drbd_wait_for_connect(struct drbd_connection *connection,
timeo = connect_int * HZ;
/* 28.5% random jitter */
- timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
+ timeo += prandom_u32_max(2) ? timeo / 7 : -timeo / 7;
err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
if (err <= 0)
@@ -1004,7 +1004,7 @@ retry:
drbd_warn(connection, "Error receiving initial packet\n");
sock_release(s);
randomize:
- if (prandom_u32() & 1)
+ if (prandom_u32_max(2))
goto retry;
}
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7c74d8cba44f..966aab902d19 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,9 +52,6 @@ static unsigned int num_devices = 1;
static size_t huge_class_size;
static const struct block_device_operations zram_devops;
-#ifdef CONFIG_ZRAM_WRITEBACK
-static const struct block_device_operations zram_wb_devops;
-#endif
static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
@@ -546,17 +543,6 @@ static ssize_t backing_dev_store(struct device *dev,
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
- /*
- * With writeback feature, zram does asynchronous IO so it's no longer
- * synchronous device so let's remove synchronous io flag. Othewise,
- * upper layer(e.g., swap) could wait IO completion rather than
- * (submit and return), which will cause system sluggish.
- * Furthermore, when the IO function returns(e.g., swap_readpage),
- * upper layer expects IO was done so it could deallocate the page
- * freely but in fact, IO is going on so finally could cause
- * use-after-free when the IO is really done.
- */
- zram->disk->fops = &zram_wb_devops;
up_write(&zram->init_lock);
pr_info("setup backing device %s\n", file_name);
@@ -1270,6 +1256,9 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
struct bio_vec bvec;
zram_slot_unlock(zram, index);
+ /* A null bio means rw_page was used, we must fallback to bio */
+ if (!bio)
+ return -EOPNOTSUPP;
bvec.bv_page = page;
bvec.bv_len = PAGE_SIZE;
@@ -1856,15 +1845,6 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
-#ifdef CONFIG_ZRAM_WRITEBACK
-static const struct block_device_operations zram_wb_devops = {
- .open = zram_open,
- .submit_bio = zram_submit_bio,
- .swap_slot_free_notify = zram_slot_free_notify,
- .owner = THIS_MODULE
-};
-#endif
-
static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index e7dd457e9b22..e98fcac578d6 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -71,7 +71,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
if (!wait)
return 0;
- cpu_relax();
+ hwrng_msleep(rng, 1000);
}
num_words = rng_readl(priv, RNG_STATUS) >> 24;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 01acf235f263..2fe28eeb2f38 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
* Returns whether or not the input pool has been seeded and thus guaranteed
* to supply cryptographically secure random numbers. This applies to: the
* /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
- * u16,u32,u64,int,long} family of functions.
+ * u16,u32,u64,long} family of functions.
*
* Returns: true if the input pool has been seeded.
* false if the input pool has not been seeded.
@@ -161,15 +161,14 @@ EXPORT_SYMBOL(wait_for_random_bytes);
* u16 get_random_u16()
* u32 get_random_u32()
* u64 get_random_u64()
- * unsigned int get_random_int()
* unsigned long get_random_long()
*
* These interfaces will return the requested number of random bytes
* into the given buffer or as a return value. This is equivalent to
- * a read from /dev/urandom. The u8, u16, u32, u64, int, and long
- * family of functions may be higher performance for one-off random
- * integers, because they do a bit of buffering and do not invoke
- * reseeding until the buffer is emptied.
+ * a read from /dev/urandom. The u8, u16, u32, u64, long family of
+ * functions may be higher performance for one-off random integers,
+ * because they do a bit of buffering and do not invoke reseeding
+ * until the buffer is emptied.
*
*********************************************************************/
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index d429ba52a719..943ea67bf135 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -136,7 +136,6 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
{
struct clk_generated *gck = to_clk_generated(hw);
struct clk_hw *parent = NULL;
- struct clk_rate_request req_parent = *req;
long best_rate = -EINVAL;
unsigned long min_rate, parent_rate;
int best_diff = -1;
@@ -192,7 +191,9 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
goto end;
for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
- req_parent.rate = req->rate * div;
+ struct clk_rate_request req_parent;
+
+ clk_hw_forward_rate_request(hw, req, parent, &req_parent, req->rate * div);
if (__clk_determine_rate(parent, &req_parent))
continue;
clk_generated_best_diff(req, parent, req_parent.rate, div,
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 164e2959c7cf..b7cd1924de52 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -581,7 +581,6 @@ static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_master *master = to_clk_master(hw);
- struct clk_rate_request req_parent = *req;
struct clk_hw *parent;
long best_rate = LONG_MIN, best_diff = LONG_MIN;
unsigned long parent_rate;
@@ -618,11 +617,15 @@ static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
goto end;
for (div = 0; div < MASTER_PRES_MAX + 1; div++) {
+ struct clk_rate_request req_parent;
+ unsigned long req_rate;
+
if (div == MASTER_PRES_MAX)
- req_parent.rate = req->rate * 3;
+ req_rate = req->rate * 3;
else
- req_parent.rate = req->rate << div;
+ req_rate = req->rate << div;
+ clk_hw_forward_rate_request(hw, req, parent, &req_parent, req_rate);
if (__clk_determine_rate(parent, &req_parent))
continue;
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index e14fa5ac734c..5104d4025484 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -269,7 +269,6 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
struct clk_hw *parent = clk_hw_get_parent(hw);
- struct clk_rate_request req_parent = *req;
unsigned long parent_rate = clk_hw_get_rate(parent);
unsigned long tmp_rate;
long best_rate = LONG_MIN;
@@ -302,8 +301,9 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
goto end;
for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- req_parent.rate = req->rate << shift;
+ struct clk_rate_request req_parent;
+ clk_hw_forward_rate_request(hw, req, parent, &req_parent, req->rate << shift);
if (__clk_determine_rate(parent, &req_parent))
continue;
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index b9c5f904f535..edfa94641bbf 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -85,10 +85,11 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
req->best_parent_hw = NULL;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
- struct clk_rate_request tmp_req = *req;
+ struct clk_rate_request tmp_req;
parent = clk_hw_get_parent(mux_hw);
+ clk_hw_forward_rate_request(hw, req, parent, &tmp_req, req->rate);
ret = clk_composite_determine_rate_for_parent(rate_hw,
&tmp_req,
parent,
@@ -104,12 +105,13 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
}
for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) {
- struct clk_rate_request tmp_req = *req;
+ struct clk_rate_request tmp_req;
parent = clk_hw_get_parent_by_index(mux_hw, i);
if (!parent)
continue;
+ clk_hw_forward_rate_request(hw, req, parent, &tmp_req, req->rate);
ret = clk_composite_determine_rate_for_parent(rate_hw,
&tmp_req,
parent,
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index f6b2bf558486..a2c2b5203b0a 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -386,13 +386,13 @@ long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
const struct clk_div_table *table,
u8 width, unsigned long flags)
{
- struct clk_rate_request req = {
- .rate = rate,
- .best_parent_rate = *prate,
- .best_parent_hw = parent,
- };
+ struct clk_rate_request req;
int ret;
+ clk_hw_init_rate_request(hw, &req, rate);
+ req.best_parent_rate = *prate;
+ req.best_parent_hw = parent;
+
ret = divider_determine_rate(hw, &req, table, width, flags);
if (ret)
return ret;
@@ -408,13 +408,13 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
const struct clk_div_table *table, u8 width,
unsigned long flags, unsigned int val)
{
- struct clk_rate_request req = {
- .rate = rate,
- .best_parent_rate = *prate,
- .best_parent_hw = parent,
- };
+ struct clk_rate_request req;
int ret;
+ clk_hw_init_rate_request(hw, &req, rate);
+ req.best_parent_rate = *prate;
+ req.best_parent_hw = parent;
+
ret = divider_ro_determine_rate(hw, &req, table, width, flags, val);
if (ret)
return ret;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index dd810bcd2700..c3c3f8c07258 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -536,6 +536,53 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
return now <= rate && now > best;
}
+static void clk_core_init_rate_req(struct clk_core * const core,
+ struct clk_rate_request *req,
+ unsigned long rate);
+
+static int clk_core_round_rate_nolock(struct clk_core *core,
+ struct clk_rate_request *req);
+
+static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent)
+{
+ struct clk_core *tmp;
+ unsigned int i;
+
+ /* Optimize for the case where the parent is already the parent. */
+ if (core->parent == parent)
+ return true;
+
+ for (i = 0; i < core->num_parents; i++) {
+ tmp = clk_core_get_parent_by_index(core, i);
+ if (!tmp)
+ continue;
+
+ if (tmp == parent)
+ return true;
+ }
+
+ return false;
+}
+
+static void
+clk_core_forward_rate_req(struct clk_core *core,
+ const struct clk_rate_request *old_req,
+ struct clk_core *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate)
+{
+ if (WARN_ON(!clk_core_has_parent(core, parent)))
+ return;
+
+ clk_core_init_rate_req(parent, req, parent_rate);
+
+ if (req->min_rate < old_req->min_rate)
+ req->min_rate = old_req->min_rate;
+
+ if (req->max_rate > old_req->max_rate)
+ req->max_rate = old_req->max_rate;
+}
+
int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long flags)
@@ -543,14 +590,20 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_core *core = hw->core, *parent, *best_parent = NULL;
int i, num_parents, ret;
unsigned long best = 0;
- struct clk_rate_request parent_req = *req;
/* if NO_REPARENT flag set, pass through to current parent */
if (core->flags & CLK_SET_RATE_NO_REPARENT) {
parent = core->parent;
if (core->flags & CLK_SET_RATE_PARENT) {
- ret = __clk_determine_rate(parent ? parent->hw : NULL,
- &parent_req);
+ struct clk_rate_request parent_req;
+
+ if (!parent) {
+ req->rate = 0;
+ return 0;
+ }
+
+ clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
+ ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
return ret;
@@ -567,23 +620,29 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
/* find the parent that can provide the fastest rate <= rate */
num_parents = core->num_parents;
for (i = 0; i < num_parents; i++) {
+ unsigned long parent_rate;
+
parent = clk_core_get_parent_by_index(core, i);
if (!parent)
continue;
if (core->flags & CLK_SET_RATE_PARENT) {
- parent_req = *req;
- ret = __clk_determine_rate(parent->hw, &parent_req);
+ struct clk_rate_request parent_req;
+
+ clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
+ ret = clk_core_round_rate_nolock(parent, &parent_req);
if (ret)
continue;
+
+ parent_rate = parent_req.rate;
} else {
- parent_req.rate = clk_core_get_rate_nolock(parent);
+ parent_rate = clk_core_get_rate_nolock(parent);
}
- if (mux_is_better_rate(req->rate, parent_req.rate,
+ if (mux_is_better_rate(req->rate, parent_rate,
best, flags)) {
best_parent = parent;
- best = parent_req.rate;
+ best = parent_rate;
}
}
@@ -625,6 +684,22 @@ static void clk_core_get_boundaries(struct clk_core *core,
*max_rate = min(*max_rate, clk_user->max_rate);
}
+/*
+ * clk_hw_get_rate_range() - returns the clock rate range for a hw clk
+ * @hw: the hw clk we want to get the range from
+ * @min_rate: pointer to the variable that will hold the minimum
+ * @max_rate: pointer to the variable that will hold the maximum
+ *
+ * Fills the @min_rate and @max_rate variables with the minimum and
+ * maximum that clock can reach.
+ */
+void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
+ unsigned long *max_rate)
+{
+ clk_core_get_boundaries(hw->core, min_rate, max_rate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_rate_range);
+
static bool clk_core_check_boundaries(struct clk_core *core,
unsigned long min_rate,
unsigned long max_rate)
@@ -1340,7 +1415,19 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
if (!core)
return 0;
- req->rate = clamp(req->rate, req->min_rate, req->max_rate);
+ /*
+ * Some clock providers hand-craft their clk_rate_requests and
+ * might not fill min_rate and max_rate.
+ *
+ * If it's the case, clamping the rate is equivalent to setting
+ * the rate to 0 which is bad. Skip the clamping but complain so
+ * that it gets fixed, hopefully.
+ */
+ if (!req->min_rate && !req->max_rate)
+ pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n",
+ __func__, core->name);
+ else
+ req->rate = clamp(req->rate, req->min_rate, req->max_rate);
/*
* At this point, core protection will be disabled
@@ -1367,13 +1454,19 @@ static int clk_core_determine_round_nolock(struct clk_core *core,
}
static void clk_core_init_rate_req(struct clk_core * const core,
- struct clk_rate_request *req)
+ struct clk_rate_request *req,
+ unsigned long rate)
{
struct clk_core *parent;
if (WARN_ON(!core || !req))
return;
+ memset(req, 0, sizeof(*req));
+
+ req->rate = rate;
+ clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
+
parent = core->parent;
if (parent) {
req->best_parent_hw = parent->hw;
@@ -1384,6 +1477,51 @@ static void clk_core_init_rate_req(struct clk_core * const core,
}
}
+/**
+ * clk_hw_init_rate_request - Initializes a clk_rate_request
+ * @hw: the clk for which we want to submit a rate request
+ * @req: the clk_rate_request structure we want to initialise
+ * @rate: the rate which is to be requested
+ *
+ * Initializes a clk_rate_request structure to submit to
+ * __clk_determine_rate() or similar functions.
+ */
+void clk_hw_init_rate_request(const struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long rate)
+{
+ if (WARN_ON(!hw || !req))
+ return;
+
+ clk_core_init_rate_req(hw->core, req, rate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_init_rate_request);
+
+/**
+ * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent
+ * @hw: the original clock that got the rate request
+ * @old_req: the original clk_rate_request structure we want to forward
+ * @parent: the clk we want to forward @old_req to
+ * @req: the clk_rate_request structure we want to initialise
+ * @parent_rate: The rate which is to be requested to @parent
+ *
+ * Initializes a clk_rate_request structure to submit to a clock parent
+ * in __clk_determine_rate() or similar functions.
+ */
+void clk_hw_forward_rate_request(const struct clk_hw *hw,
+ const struct clk_rate_request *old_req,
+ const struct clk_hw *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate)
+{
+ if (WARN_ON(!hw || !old_req || !parent || !req))
+ return;
+
+ clk_core_forward_rate_req(hw->core, old_req,
+ parent->core, req,
+ parent_rate);
+}
+
static bool clk_core_can_round(struct clk_core * const core)
{
return core->ops->determine_rate || core->ops->round_rate;
@@ -1392,6 +1530,8 @@ static bool clk_core_can_round(struct clk_core * const core)
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
+ int ret;
+
lockdep_assert_held(&prepare_lock);
if (!core) {
@@ -1399,12 +1539,22 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
return 0;
}
- clk_core_init_rate_req(core, req);
-
if (clk_core_can_round(core))
return clk_core_determine_round_nolock(core, req);
- else if (core->flags & CLK_SET_RATE_PARENT)
- return clk_core_round_rate_nolock(core->parent, req);
+
+ if (core->flags & CLK_SET_RATE_PARENT) {
+ struct clk_rate_request parent_req;
+
+ clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
+ ret = clk_core_round_rate_nolock(core->parent, &parent_req);
+ if (ret)
+ return ret;
+
+ req->best_parent_rate = parent_req.rate;
+ req->rate = parent_req.rate;
+
+ return 0;
+ }
req->rate = core->rate;
return 0;
@@ -1448,8 +1598,7 @@ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
int ret;
struct clk_rate_request req;
- clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
- req.rate = rate;
+ clk_core_init_rate_req(hw->core, &req, rate);
ret = clk_core_round_rate_nolock(hw->core, &req);
if (ret)
@@ -1481,8 +1630,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
- clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
- req.rate = rate;
+ clk_core_init_rate_req(clk->core, &req, rate);
ret = clk_core_round_rate_nolock(clk->core, &req);
@@ -1611,6 +1759,7 @@ static unsigned long clk_recalc(struct clk_core *core,
/**
* __clk_recalc_rates
* @core: first clk in the subtree
+ * @update_req: Whether req_rate should be updated with the new rate
* @msg: notification type (see include/linux/clk.h)
*
* Walks the subtree of clks starting with clk and recalculates rates as it
@@ -1620,7 +1769,8 @@ static unsigned long clk_recalc(struct clk_core *core,
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
*/
-static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
+static void __clk_recalc_rates(struct clk_core *core, bool update_req,
+ unsigned long msg)
{
unsigned long old_rate;
unsigned long parent_rate = 0;
@@ -1634,6 +1784,8 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
parent_rate = core->parent->rate;
core->rate = clk_recalc(core, parent_rate);
+ if (update_req)
+ core->req_rate = core->rate;
/*
* ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
@@ -1643,13 +1795,13 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
__clk_notify(core, msg, old_rate, core->rate);
hlist_for_each_entry(child, &core->children, child_node)
- __clk_recalc_rates(child, msg);
+ __clk_recalc_rates(child, update_req, msg);
}
static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
{
if (core && (core->flags & CLK_GET_RATE_NOCACHE))
- __clk_recalc_rates(core, 0);
+ __clk_recalc_rates(core, false, 0);
return clk_core_get_rate_nolock(core);
}
@@ -1659,8 +1811,9 @@ static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
- * is set, which means a recalc_rate will be issued.
- * If clk is NULL then returns 0.
+ * is set, which means a recalc_rate will be issued. Can be called regardless of
+ * the clock enabledness. If clk is NULL, or if an error occurred, then returns
+ * 0.
*/
unsigned long clk_get_rate(struct clk *clk)
{
@@ -1864,6 +2017,7 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
flags = clk_enable_lock();
clk_reparent(core, old_parent);
clk_enable_unlock(flags);
+
__clk_set_parent_after(core, old_parent, parent);
return ret;
@@ -1969,11 +2123,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
if (clk_core_can_round(core)) {
struct clk_rate_request req;
- req.rate = rate;
- req.min_rate = min_rate;
- req.max_rate = max_rate;
-
- clk_core_init_rate_req(core, &req);
+ clk_core_init_rate_req(core, &req, rate);
ret = clk_core_determine_round_nolock(core, &req);
if (ret < 0)
@@ -2172,8 +2322,7 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
if (cnt < 0)
return cnt;
- clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
- req.rate = req_rate;
+ clk_core_init_rate_req(core, &req, req_rate);
ret = clk_core_round_rate_nolock(core, &req);
@@ -2324,19 +2473,15 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
}
EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
-/**
- * clk_set_rate_range - set a rate range for a clock source
- * @clk: clock source
- * @min: desired minimum clock rate in Hz, inclusive
- * @max: desired maximum clock rate in Hz, inclusive
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+static int clk_set_rate_range_nolock(struct clk *clk,
+ unsigned long min,
+ unsigned long max)
{
int ret = 0;
unsigned long old_min, old_max, rate;
+ lockdep_assert_held(&prepare_lock);
+
if (!clk)
return 0;
@@ -2349,8 +2494,6 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
return -EINVAL;
}
- clk_prepare_lock();
-
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
@@ -2365,6 +2508,10 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
goto out;
}
+ rate = clk->core->req_rate;
+ if (clk->core->flags & CLK_GET_RATE_NOCACHE)
+ rate = clk_core_get_rate_recalc(clk->core);
+
/*
* Since the boundaries have been changed, let's give the
* opportunity to the provider to adjust the clock rate based on
@@ -2382,7 +2529,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
* - the determine_rate() callback does not really check for
* this corner case when determining the rate
*/
- rate = clamp(clk->core->req_rate, min, max);
+ rate = clamp(rate, min, max);
ret = clk_core_set_rate_nolock(clk->core, rate);
if (ret) {
/* rollback the changes */
@@ -2394,6 +2541,28 @@ out:
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
+ return ret;
+}
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Return: 0 for success or negative errno on failure.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+{
+ int ret;
+
+ if (!clk)
+ return 0;
+
+ clk_prepare_lock();
+
+ ret = clk_set_rate_range_nolock(clk, min, max);
+
clk_prepare_unlock();
return ret;
@@ -2473,7 +2642,7 @@ static void clk_core_reparent(struct clk_core *core,
{
clk_reparent(core, new_parent);
__clk_recalc_accuracies(core);
- __clk_recalc_rates(core, POST_RATE_CHANGE);
+ __clk_recalc_rates(core, true, POST_RATE_CHANGE);
}
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
@@ -2494,27 +2663,13 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
-bool clk_has_parent(struct clk *clk, struct clk *parent)
+bool clk_has_parent(const struct clk *clk, const struct clk *parent)
{
- struct clk_core *core, *parent_core;
- int i;
-
/* NULL clocks should be nops, so return success if either is NULL. */
if (!clk || !parent)
return true;
- core = clk->core;
- parent_core = parent->core;
-
- /* Optimize for the case where the parent is already the parent. */
- if (core->parent == parent_core)
- return true;
-
- for (i = 0; i < core->num_parents; i++)
- if (!strcmp(core->parents[i].name, parent_core->name))
- return true;
-
- return false;
+ return clk_core_has_parent(clk->core, parent->core);
}
EXPORT_SYMBOL_GPL(clk_has_parent);
@@ -2571,9 +2726,9 @@ static int clk_core_set_parent_nolock(struct clk_core *core,
/* propagate rate an accuracy recalculation accordingly */
if (ret) {
- __clk_recalc_rates(core, ABORT_RATE_CHANGE);
+ __clk_recalc_rates(core, true, ABORT_RATE_CHANGE);
} else {
- __clk_recalc_rates(core, POST_RATE_CHANGE);
+ __clk_recalc_rates(core, true, POST_RATE_CHANGE);
__clk_recalc_accuracies(core);
}
@@ -3470,7 +3625,7 @@ static void clk_core_reparent_orphans_nolock(void)
__clk_set_parent_before(orphan, parent);
__clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan);
- __clk_recalc_rates(orphan, 0);
+ __clk_recalc_rates(orphan, true, 0);
/*
* __clk_init_parent() will set the initial req_rate to
@@ -4346,9 +4501,10 @@ void __clk_put(struct clk *clk)
}
hlist_del(&clk->clks_node);
- if (clk->min_rate > clk->core->req_rate ||
- clk->max_rate < clk->core->req_rate)
- clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+
+ /* If we had any boundaries on that clock, let's drop them. */
+ if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
+ clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
owner = clk->core->owner;
kref_put(&clk->core->ref, __clk_release);
diff --git a/drivers/clk/clk_test.c b/drivers/clk/clk_test.c
index 6731a822f4e3..f9a5c2964c65 100644
--- a/drivers/clk/clk_test.c
+++ b/drivers/clk/clk_test.c
@@ -108,6 +108,39 @@ static const struct clk_ops clk_dummy_single_parent_ops = {
.get_parent = clk_dummy_single_get_parent,
};
+struct clk_multiple_parent_ctx {
+ struct clk_dummy_context parents_ctx[2];
+ struct clk_hw hw;
+ u8 current_parent;
+};
+
+static int clk_multiple_parents_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_multiple_parent_ctx *ctx =
+ container_of(hw, struct clk_multiple_parent_ctx, hw);
+
+ if (index >= clk_hw_get_num_parents(hw))
+ return -EINVAL;
+
+ ctx->current_parent = index;
+
+ return 0;
+}
+
+static u8 clk_multiple_parents_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_multiple_parent_ctx *ctx =
+ container_of(hw, struct clk_multiple_parent_ctx, hw);
+
+ return ctx->current_parent;
+}
+
+static const struct clk_ops clk_multiple_parents_mux_ops = {
+ .get_parent = clk_multiple_parents_mux_get_parent,
+ .set_parent = clk_multiple_parents_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate_closest,
+};
+
static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
{
struct clk_dummy_context *ctx;
@@ -160,12 +193,14 @@ static void clk_test_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, ctx->rate);
+
+ clk_put(clk);
}
/*
@@ -179,7 +214,7 @@ static void clk_test_set_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -189,6 +224,8 @@ static void clk_test_set_get_rate(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(clk);
}
/*
@@ -202,7 +239,7 @@ static void clk_test_set_set_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -216,6 +253,8 @@ static void clk_test_set_set_get_rate(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -226,7 +265,7 @@ static void clk_test_round_set_get_rate(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rounded_rate, set_rate;
rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
@@ -240,6 +279,8 @@ static void clk_test_round_set_get_rate(struct kunit *test)
set_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, set_rate, 0);
KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
+
+ clk_put(clk);
}
static struct kunit_case clk_test_cases[] = {
@@ -250,6 +291,11 @@ static struct kunit_case clk_test_cases[] = {
{}
};
+/*
+ * Test suite for a basic rate clock, without any parent.
+ *
+ * These tests exercise the rate API with simple scenarios
+ */
static struct kunit_suite clk_test_suite = {
.name = "clk-test",
.init = clk_test_init,
@@ -257,11 +303,912 @@ static struct kunit_suite clk_test_suite = {
.test_cases = clk_test_cases,
};
+static int clk_uncached_test_init(struct kunit *test)
+{
+ struct clk_dummy_context *ctx;
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->rate = DUMMY_CLOCK_INIT_RATE;
+ ctx->hw.init = CLK_HW_INIT_NO_PARENT("test-clk",
+ &clk_dummy_rate_ops,
+ CLK_GET_RATE_NOCACHE);
+
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Test that for an uncached clock, the clock framework doesn't cache
+ * the rate and clk_get_rate() will return the underlying clock rate
+ * even if it changed.
+ */
+static void clk_test_uncached_get_rate(struct kunit *test)
+{
+ struct clk_dummy_context *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
+
+ /* We change the rate behind the clock framework's back */
+ ctx->rate = DUMMY_CLOCK_RATE_1;
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for an uncached clock, clk_set_rate_range() will work
+ * properly if the rate hasn't changed.
+ */
+static void clk_test_uncached_set_range(struct kunit *test)
+{
+ struct clk_dummy_context *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(clk,
+ DUMMY_CLOCK_RATE_1,
+ DUMMY_CLOCK_RATE_2),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for an uncached clock, clk_set_rate_range() will work
+ * properly if the rate has changed in hardware.
+ *
+ * In this case, it means that if the rate wasn't initially in the range
+ * we're trying to set, but got changed at some point into the range
+ * without the kernel knowing about it, its rate shouldn't be affected.
+ */
+static void clk_test_uncached_updated_rate_set_range(struct kunit *test)
+{
+ struct clk_dummy_context *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+
+ /* We change the rate behind the clock framework's back */
+ ctx->rate = DUMMY_CLOCK_RATE_1 + 1000;
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(clk,
+ DUMMY_CLOCK_RATE_1,
+ DUMMY_CLOCK_RATE_2),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+
+ clk_put(clk);
+}
+
+static struct kunit_case clk_uncached_test_cases[] = {
+ KUNIT_CASE(clk_test_uncached_get_rate),
+ KUNIT_CASE(clk_test_uncached_set_range),
+ KUNIT_CASE(clk_test_uncached_updated_rate_set_range),
+ {}
+};
+
+/*
+ * Test suite for a basic, uncached, rate clock, without any parent.
+ *
+ * These tests exercise the rate API with simple scenarios
+ */
+static struct kunit_suite clk_uncached_test_suite = {
+ .name = "clk-uncached-test",
+ .init = clk_uncached_test_init,
+ .exit = clk_test_exit,
+ .test_cases = clk_uncached_test_cases,
+};
+
+static int
+clk_multiple_parents_mux_test_init(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx;
+ const char *parents[2] = { "parent-0", "parent-1"};
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
+ ret = clk_hw_register(NULL, &ctx->parents_ctx[0].hw);
+ if (ret)
+ return ret;
+
+ ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
+ ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
+ if (ret)
+ return ret;
+
+ ctx->current_parent = 0;
+ ctx->hw.init = CLK_HW_INIT_PARENTS("test-mux", parents,
+ &clk_multiple_parents_mux_ops,
+ CLK_SET_RATE_PARENT);
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+clk_multiple_parents_mux_test_exit(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+
+ clk_hw_unregister(&ctx->hw);
+ clk_hw_unregister(&ctx->parents_ctx[0].hw);
+ clk_hw_unregister(&ctx->parents_ctx[1].hw);
+}
+
+/*
+ * Test that for a clock with multiple parents, clk_get_parent()
+ * actually returns the current one.
+ */
+static void
+clk_test_multiple_parents_mux_get_parent(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
+
+ KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock with a multiple parents, clk_has_parent()
+ * actually reports all of them as parents.
+ */
+static void
+clk_test_multiple_parents_mux_has_parent(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
+ KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
+ clk_put(parent);
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
+ clk_put(parent);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock with a multiple parents, if we set a range on
+ * that clock and the parent is changed, its rate after the reparenting
+ * is still within the range we asked for.
+ *
+ * FIXME: clk_set_parent() only does the reparenting but doesn't
+ * reevaluate whether the new clock rate is within its boundaries or
+ * not.
+ */
+static void
+clk_test_multiple_parents_mux_set_range_set_parent_get_rate(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent1, *parent2;
+ unsigned long rate;
+ int ret;
+
+ kunit_skip(test, "This needs to be fixed in the core.");
+
+ parent1 = clk_hw_get_clk(&ctx->parents_ctx[0].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent1);
+ KUNIT_ASSERT_TRUE(test, clk_is_match(clk_get_parent(clk), parent1));
+
+ parent2 = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent2);
+
+ ret = clk_set_rate(parent1, DUMMY_CLOCK_RATE_1);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate(parent2, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk,
+ DUMMY_CLOCK_RATE_1 - 1000,
+ DUMMY_CLOCK_RATE_1 + 1000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_parent(clk, parent2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 - 1000);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+
+ clk_put(parent2);
+ clk_put(parent1);
+ clk_put(clk);
+}
+
+static struct kunit_case clk_multiple_parents_mux_test_cases[] = {
+ KUNIT_CASE(clk_test_multiple_parents_mux_get_parent),
+ KUNIT_CASE(clk_test_multiple_parents_mux_has_parent),
+ KUNIT_CASE(clk_test_multiple_parents_mux_set_range_set_parent_get_rate),
+ {}
+};
+
+/*
+ * Test suite for a basic mux clock with two parents, with
+ * CLK_SET_RATE_PARENT on the child.
+ *
+ * These tests exercise the consumer API and check that the state of the
+ * child and parents are sane and consistent.
+ */
+static struct kunit_suite
+clk_multiple_parents_mux_test_suite = {
+ .name = "clk-multiple-parents-mux-test",
+ .init = clk_multiple_parents_mux_test_init,
+ .exit = clk_multiple_parents_mux_test_exit,
+ .test_cases = clk_multiple_parents_mux_test_cases,
+};
+
+static int
+clk_orphan_transparent_multiple_parent_mux_test_init(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx;
+ const char *parents[2] = { "missing-parent", "proper-parent"};
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("proper-parent",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->parents_ctx[1].rate = DUMMY_CLOCK_INIT_RATE;
+ ret = clk_hw_register(NULL, &ctx->parents_ctx[1].hw);
+ if (ret)
+ return ret;
+
+ ctx->hw.init = CLK_HW_INIT_PARENTS("test-orphan-mux", parents,
+ &clk_multiple_parents_mux_ops,
+ CLK_SET_RATE_PARENT);
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+clk_orphan_transparent_multiple_parent_mux_test_exit(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+
+ clk_hw_unregister(&ctx->hw);
+ clk_hw_unregister(&ctx->parents_ctx[1].hw);
+}
+
+/*
+ * Test that, for a mux whose current parent hasn't been registered yet and is
+ * thus orphan, clk_get_parent() will return NULL.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_get_parent(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+
+ KUNIT_EXPECT_PTR_EQ(test, clk_get_parent(clk), NULL);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux whose current parent hasn't been registered yet,
+ * calling clk_set_parent() to a valid parent will properly update the
+ * mux parent and its orphan status.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent, *new_parent;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_parent = clk_get_parent(clk);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+ KUNIT_EXPECT_TRUE(test, clk_is_match(parent, new_parent));
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux that started orphan but got switched to a valid
+ * parent, calling clk_drop_range() on the mux won't affect the parent
+ * rate.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long parent_rate, new_parent_rate;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, parent_rate, 0);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_drop_range(clk);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_parent_rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, new_parent_rate, 0);
+ KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux that started orphan but got switched to a valid
+ * parent, the rate of the mux and its new parent are consistent.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long parent_rate, rate;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, parent_rate, 0);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, parent_rate, rate);
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux that started orphan but got switched to a valid
+ * parent, calling clk_put() on the mux won't affect the parent rate.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent_put(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk *clk, *parent;
+ unsigned long parent_rate, new_parent_rate;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ clk = clk_hw_get_clk(&ctx->hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, clk);
+
+ parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, parent_rate, 0);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ clk_put(clk);
+
+ new_parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, new_parent_rate, 0);
+ KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
+
+ clk_put(parent);
+}
+
+/*
+ * Test that, for a mux that started orphan but got switched to a valid
+ * parent, calling clk_set_rate_range() will affect the parent state if
+ * its rate is out of range.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long rate;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux that started orphan but got switched to a valid
+ * parent, calling clk_set_rate_range() won't affect the parent state if
+ * its rate is within range.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long parent_rate, new_parent_rate;
+ int ret;
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, parent_rate, 0);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk,
+ DUMMY_CLOCK_INIT_RATE - 1000,
+ DUMMY_CLOCK_INIT_RATE + 1000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_parent_rate = clk_get_rate(parent);
+ KUNIT_ASSERT_GT(test, new_parent_rate, 0);
+ KUNIT_EXPECT_EQ(test, parent_rate, new_parent_rate);
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux whose current parent hasn't been registered yet,
+ * calling clk_set_rate_range() will succeed, and will be taken into
+ * account when rounding a rate.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+ int ret;
+
+ ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a mux that started orphan, was assigned and rate and
+ * then got switched to a valid parent, its rate is eventually within
+ * range.
+ *
+ * FIXME: Even though we update the rate as part of clk_set_parent(), we
+ * don't evaluate whether that new rate is within range and needs to be
+ * adjusted.
+ */
+static void
+clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate(struct kunit *test)
+{
+ struct clk_multiple_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long rate;
+ int ret;
+
+ kunit_skip(test, "This needs to be fixed in the core.");
+
+ clk_hw_set_rate_range(hw, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+
+ parent = clk_hw_get_clk(&ctx->parents_ctx[1].hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, parent);
+
+ ret = clk_set_parent(clk, parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+static struct kunit_case clk_orphan_transparent_multiple_parent_mux_test_cases[] = {
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_get_parent),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_drop_range),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_get_rate),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_put),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_modified),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_parent_set_range_untouched),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_round_rate),
+ KUNIT_CASE(clk_test_orphan_transparent_multiple_parent_mux_set_range_set_parent_get_rate),
+ {}
+};
+
+/*
+ * Test suite for a basic mux clock with two parents. The default parent
+ * isn't registered, only the second parent is. By default, the clock
+ * will thus be orphan.
+ *
+ * These tests exercise the behaviour of the consumer API when dealing
+ * with an orphan clock, and how we deal with the transition to a valid
+ * parent.
+ */
+static struct kunit_suite clk_orphan_transparent_multiple_parent_mux_test_suite = {
+ .name = "clk-orphan-transparent-multiple-parent-mux-test",
+ .init = clk_orphan_transparent_multiple_parent_mux_test_init,
+ .exit = clk_orphan_transparent_multiple_parent_mux_test_exit,
+ .test_cases = clk_orphan_transparent_multiple_parent_mux_test_cases,
+};
+
struct clk_single_parent_ctx {
struct clk_dummy_context parent_ctx;
struct clk_hw hw;
};
+static int clk_single_parent_mux_test_init(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx;
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
+ ctx->parent_ctx.hw.init =
+ CLK_HW_INIT_NO_PARENT("parent-clk",
+ &clk_dummy_rate_ops,
+ 0);
+
+ ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
+ if (ret)
+ return ret;
+
+ ctx->hw.init = CLK_HW_INIT("test-clk", "parent-clk",
+ &clk_dummy_single_parent_ops,
+ CLK_SET_RATE_PARENT);
+
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+clk_single_parent_mux_test_exit(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+
+ clk_hw_unregister(&ctx->hw);
+ clk_hw_unregister(&ctx->parent_ctx.hw);
+}
+
+/*
+ * Test that for a clock with a single parent, clk_get_parent() actually
+ * returns the parent.
+ */
+static void
+clk_test_single_parent_mux_get_parent(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
+
+ KUNIT_EXPECT_TRUE(test, clk_is_match(clk_get_parent(clk), parent));
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock with a single parent, clk_has_parent() actually
+ * reports it as a parent.
+ */
+static void
+clk_test_single_parent_mux_has_parent(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent = clk_hw_get_clk(&ctx->parent_ctx.hw, NULL);
+
+ KUNIT_EXPECT_TRUE(test, clk_has_parent(clk, parent));
+
+ clk_put(parent);
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock that can't modify its rate and with a single
+ * parent, if we set disjoints range on the parent and then the child,
+ * the second will return an error.
+ *
+ * FIXME: clk_set_rate_range() only considers the current clock when
+ * evaluating whether ranges are disjoints and not the upstream clocks
+ * ranges.
+ */
+static void
+clk_test_single_parent_mux_set_range_disjoint_child_last(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ int ret;
+
+ kunit_skip(test, "This needs to be fixed in the core.");
+
+ parent = clk_get_parent(clk);
+ KUNIT_ASSERT_PTR_NE(test, parent, NULL);
+
+ ret = clk_set_rate_range(parent, 1000, 2000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk, 3000, 4000);
+ KUNIT_EXPECT_LT(test, ret, 0);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock that can't modify its rate and with a single
+ * parent, if we set disjoints range on the child and then the parent,
+ * the second will return an error.
+ *
+ * FIXME: clk_set_rate_range() only considers the current clock when
+ * evaluating whether ranges are disjoints and not the downstream clocks
+ * ranges.
+ */
+static void
+clk_test_single_parent_mux_set_range_disjoint_parent_last(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ int ret;
+
+ kunit_skip(test, "This needs to be fixed in the core.");
+
+ parent = clk_get_parent(clk);
+ KUNIT_ASSERT_PTR_NE(test, parent, NULL);
+
+ ret = clk_set_rate_range(clk, 1000, 2000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(parent, 3000, 4000);
+ KUNIT_EXPECT_LT(test, ret, 0);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock that can't modify its rate and with a single
+ * parent, if we set a range on the parent and then call
+ * clk_round_rate(), the boundaries of the parent are taken into
+ * account.
+ */
+static void
+clk_test_single_parent_mux_set_range_round_rate_parent_only(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long rate;
+ int ret;
+
+ parent = clk_get_parent(clk);
+ KUNIT_ASSERT_PTR_NE(test, parent, NULL);
+
+ ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock that can't modify its rate and with a single
+ * parent, if we set a range on the parent and a more restrictive one on
+ * the child, and then call clk_round_rate(), the boundaries of the
+ * two clocks are taken into account.
+ */
+static void
+clk_test_single_parent_mux_set_range_round_rate_child_smaller(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long rate;
+ int ret;
+
+ parent = clk_get_parent(clk);
+ KUNIT_ASSERT_PTR_NE(test, parent, NULL);
+
+ ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that for a clock that can't modify its rate and with a single
+ * parent, if we set a range on the child and a more restrictive one on
+ * the parent, and then call clk_round_rate(), the boundaries of the
+ * two clocks are taken into account.
+ */
+static void
+clk_test_single_parent_mux_set_range_round_rate_parent_smaller(struct kunit *test)
+{
+ struct clk_single_parent_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *parent;
+ unsigned long rate;
+ int ret;
+
+ parent = clk_get_parent(clk);
+ KUNIT_ASSERT_PTR_NE(test, parent, NULL);
+
+ ret = clk_set_rate_range(parent, DUMMY_CLOCK_RATE_1 + 1000, DUMMY_CLOCK_RATE_2 - 1000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = clk_set_rate_range(clk, DUMMY_CLOCK_RATE_1, DUMMY_CLOCK_RATE_2);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
+
+ rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
+ KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
+
+ clk_put(clk);
+}
+
+static struct kunit_case clk_single_parent_mux_test_cases[] = {
+ KUNIT_CASE(clk_test_single_parent_mux_get_parent),
+ KUNIT_CASE(clk_test_single_parent_mux_has_parent),
+ KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_child_last),
+ KUNIT_CASE(clk_test_single_parent_mux_set_range_disjoint_parent_last),
+ KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_child_smaller),
+ KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_only),
+ KUNIT_CASE(clk_test_single_parent_mux_set_range_round_rate_parent_smaller),
+ {}
+};
+
+/*
+ * Test suite for a basic mux clock with one parent, with
+ * CLK_SET_RATE_PARENT on the child.
+ *
+ * These tests exercise the consumer API and check that the state of the
+ * child and parent are sane and consistent.
+ */
+static struct kunit_suite
+clk_single_parent_mux_test_suite = {
+ .name = "clk-single-parent-mux-test",
+ .init = clk_single_parent_mux_test_init,
+ .exit = clk_single_parent_mux_test_exit,
+ .test_cases = clk_single_parent_mux_test_cases,
+};
+
static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
{
struct clk_single_parent_ctx *ctx;
@@ -298,23 +1245,18 @@ static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test
return 0;
}
-static void clk_orphan_transparent_single_parent_mux_test_exit(struct kunit *test)
-{
- struct clk_single_parent_ctx *ctx = test->priv;
-
- clk_hw_unregister(&ctx->hw);
- clk_hw_unregister(&ctx->parent_ctx.hw);
-}
-
/*
* Test that a mux-only clock, with an initial rate within a range,
* will still have the same rate after the range has been enforced.
+ *
+ * See:
+ * https://lore.kernel.org/linux-clk/7720158d-10a7-a17b-73a4-a8615c9c6d5c@collabora.com/
*/
static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
{
struct clk_single_parent_ctx *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate, new_rate;
rate = clk_get_rate(clk);
@@ -329,6 +1271,8 @@ static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
new_rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, new_rate, 0);
KUNIT_EXPECT_EQ(test, rate, new_rate);
+
+ clk_put(clk);
}
static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
@@ -336,13 +1280,151 @@ static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] =
{}
};
+/*
+ * Test suite for a basic mux clock with one parent. The parent is
+ * registered after its child. The clock will thus be an orphan when
+ * registered, but will no longer be when the tests run.
+ *
+ * These tests make sure a clock that used to be orphan has a sane,
+ * consistent, behaviour.
+ */
static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
.name = "clk-orphan-transparent-single-parent-test",
.init = clk_orphan_transparent_single_parent_mux_test_init,
- .exit = clk_orphan_transparent_single_parent_mux_test_exit,
+ .exit = clk_single_parent_mux_test_exit,
.test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
};
+struct clk_single_parent_two_lvl_ctx {
+ struct clk_dummy_context parent_parent_ctx;
+ struct clk_dummy_context parent_ctx;
+ struct clk_hw hw;
+};
+
+static int
+clk_orphan_two_level_root_last_test_init(struct kunit *test)
+{
+ struct clk_single_parent_two_lvl_ctx *ctx;
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->parent_ctx.hw.init =
+ CLK_HW_INIT("intermediate-parent",
+ "root-parent",
+ &clk_dummy_single_parent_ops,
+ CLK_SET_RATE_PARENT);
+ ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
+ if (ret)
+ return ret;
+
+ ctx->hw.init =
+ CLK_HW_INIT("test-clk", "intermediate-parent",
+ &clk_dummy_single_parent_ops,
+ CLK_SET_RATE_PARENT);
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ ctx->parent_parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
+ ctx->parent_parent_ctx.hw.init =
+ CLK_HW_INIT_NO_PARENT("root-parent",
+ &clk_dummy_rate_ops,
+ 0);
+ ret = clk_hw_register(NULL, &ctx->parent_parent_ctx.hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+clk_orphan_two_level_root_last_test_exit(struct kunit *test)
+{
+ struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
+
+ clk_hw_unregister(&ctx->hw);
+ clk_hw_unregister(&ctx->parent_ctx.hw);
+ clk_hw_unregister(&ctx->parent_parent_ctx.hw);
+}
+
+/*
+ * Test that, for a clock whose parent used to be orphan, clk_get_rate()
+ * will return the proper rate.
+ */
+static void
+clk_orphan_two_level_root_last_test_get_rate(struct kunit *test)
+{
+ struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+
+ rate = clk_get_rate(clk);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
+
+ clk_put(clk);
+}
+
+/*
+ * Test that, for a clock whose parent used to be orphan,
+ * clk_set_rate_range() won't affect its rate if it is already within
+ * range.
+ *
+ * See (for Exynos 4210):
+ * https://lore.kernel.org/linux-clk/366a0232-bb4a-c357-6aa8-636e398e05eb@samsung.com/
+ */
+static void
+clk_orphan_two_level_root_last_test_set_range(struct kunit *test)
+{
+ struct clk_single_parent_two_lvl_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ unsigned long rate;
+ int ret;
+
+ ret = clk_set_rate_range(clk,
+ DUMMY_CLOCK_INIT_RATE - 1000,
+ DUMMY_CLOCK_INIT_RATE + 1000);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_INIT_RATE);
+
+ clk_put(clk);
+}
+
+static struct kunit_case
+clk_orphan_two_level_root_last_test_cases[] = {
+ KUNIT_CASE(clk_orphan_two_level_root_last_test_get_rate),
+ KUNIT_CASE(clk_orphan_two_level_root_last_test_set_range),
+ {}
+};
+
+/*
+ * Test suite for a basic, transparent, clock with a parent that is also
+ * such a clock. The parent's parent is registered last, while the
+ * parent and its child are registered in that order. The intermediate
+ * and leaf clocks will thus be orphan when registered, but the leaf
+ * clock itself will always have its parent and will never be
+ * reparented. Indeed, it's only orphan because its parent is.
+ *
+ * These tests exercise the behaviour of the consumer API when dealing
+ * with an orphan clock, and how we deal with the transition to a valid
+ * parent.
+ */
+static struct kunit_suite
+clk_orphan_two_level_root_last_test_suite = {
+ .name = "clk-orphan-two-level-root-last-test",
+ .init = clk_orphan_two_level_root_last_test_init,
+ .exit = clk_orphan_two_level_root_last_test_exit,
+ .test_cases = clk_orphan_two_level_root_last_test_cases,
+};
+
/*
* Test that clk_set_rate_range won't return an error for a valid range
* and that it will make sure the rate of the clock is within the
@@ -352,7 +1434,7 @@ static void clk_range_test_set_range(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -365,6 +1447,8 @@ static void clk_range_test_set_range(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -375,13 +1459,15 @@ static void clk_range_test_set_range_invalid(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
KUNIT_EXPECT_LT(test,
clk_set_rate_range(clk,
DUMMY_CLOCK_RATE_1 + 1000,
DUMMY_CLOCK_RATE_1),
0);
+
+ clk_put(clk);
}
/*
@@ -420,7 +1506,7 @@ static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
long rate;
KUNIT_ASSERT_EQ(test,
@@ -433,6 +1519,8 @@ static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -443,7 +1531,7 @@ static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -460,6 +1548,8 @@ static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -472,7 +1562,7 @@ static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kuni
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
long rounded;
KUNIT_ASSERT_EQ(test,
@@ -489,6 +1579,8 @@ static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kuni
0);
KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
+
+ clk_put(clk);
}
/*
@@ -499,7 +1591,7 @@ static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
long rate;
KUNIT_ASSERT_EQ(test,
@@ -512,6 +1604,8 @@ static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -522,7 +1616,7 @@ static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -539,6 +1633,8 @@ static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -551,7 +1647,7 @@ static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kun
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
long rounded;
KUNIT_ASSERT_EQ(test,
@@ -568,6 +1664,8 @@ static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kun
0);
KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
+
+ clk_put(clk);
}
/*
@@ -582,7 +1680,7 @@ static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -598,6 +1696,8 @@ static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(clk);
}
/*
@@ -612,7 +1712,7 @@ static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -628,6 +1728,8 @@ static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
static struct kunit_case clk_range_test_cases[] = {
@@ -645,6 +1747,12 @@ static struct kunit_case clk_range_test_cases[] = {
{}
};
+/*
+ * Test suite for a basic rate clock, without any parent.
+ *
+ * These tests exercise the rate range API: clk_set_rate_range(),
+ * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range().
+ */
static struct kunit_suite clk_range_test_suite = {
.name = "clk-range-test",
.init = clk_test_init,
@@ -664,7 +1772,7 @@ static void clk_range_test_set_range_rate_maximized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -700,6 +1808,8 @@ static void clk_range_test_set_range_rate_maximized(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(clk);
}
/*
@@ -714,7 +1824,7 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2;
unsigned long rate;
@@ -758,14 +1868,79 @@ static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
clk_put(user2);
clk_put(user1);
+ clk_put(clk);
+}
+
+/*
+ * Test that if we have several subsequent calls to
+ * clk_set_rate_range(), across multiple users, the core will reevaluate
+ * whether a new rate is needed, including when a user drop its clock.
+ *
+ * With clk_dummy_maximize_rate_ops, this means that the rate will
+ * trail along the maximum as it evolves.
+ */
+static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
+{
+ struct clk_dummy_context *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *user1, *user2;
+ unsigned long rate;
+
+ user1 = clk_hw_get_clk(hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
+
+ user2 = clk_hw_get_clk(hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
+ 0);
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(user1,
+ 0,
+ DUMMY_CLOCK_RATE_2),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(user2,
+ 0,
+ DUMMY_CLOCK_RATE_1),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(user2);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(user1);
+ clk_put(clk);
}
static struct kunit_case clk_range_maximize_test_cases[] = {
KUNIT_CASE(clk_range_test_set_range_rate_maximized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
+ KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
{}
};
+/*
+ * Test suite for a basic rate clock, without any parent.
+ *
+ * These tests exercise the rate range API: clk_set_rate_range(),
+ * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
+ * driver that will always try to run at the highest possible rate.
+ */
static struct kunit_suite clk_range_maximize_test_suite = {
.name = "clk-range-maximize-test",
.init = clk_maximize_test_init,
@@ -785,7 +1960,7 @@ static void clk_range_test_set_range_rate_minimized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
unsigned long rate;
KUNIT_ASSERT_EQ(test,
@@ -821,6 +1996,8 @@ static void clk_range_test_set_range_rate_minimized(struct kunit *test)
rate = clk_get_rate(clk);
KUNIT_ASSERT_GT(test, rate, 0);
KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(clk);
}
/*
@@ -835,7 +2012,7 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
{
struct clk_dummy_context *ctx = test->priv;
struct clk_hw *hw = &ctx->hw;
- struct clk *clk = hw->clk;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
struct clk *user1, *user2;
unsigned long rate;
@@ -875,14 +2052,75 @@ static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
clk_put(user2);
clk_put(user1);
+ clk_put(clk);
+}
+
+/*
+ * Test that if we have several subsequent calls to
+ * clk_set_rate_range(), across multiple users, the core will reevaluate
+ * whether a new rate is needed, including when a user drop its clock.
+ *
+ * With clk_dummy_minimize_rate_ops, this means that the rate will
+ * trail along the minimum as it evolves.
+ */
+static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
+{
+ struct clk_dummy_context *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *user1, *user2;
+ unsigned long rate;
+
+ user1 = clk_hw_get_clk(hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
+
+ user2 = clk_hw_get_clk(hw, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(user1,
+ DUMMY_CLOCK_RATE_1,
+ ULONG_MAX),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ KUNIT_ASSERT_EQ(test,
+ clk_set_rate_range(user2,
+ DUMMY_CLOCK_RATE_2,
+ ULONG_MAX),
+ 0);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(user2);
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_GT(test, rate, 0);
+ KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_put(user1);
+ clk_put(clk);
}
static struct kunit_case clk_range_minimize_test_cases[] = {
KUNIT_CASE(clk_range_test_set_range_rate_minimized),
KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
+ KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
{}
};
+/*
+ * Test suite for a basic rate clock, without any parent.
+ *
+ * These tests exercise the rate range API: clk_set_rate_range(),
+ * clk_set_min_rate(), clk_set_max_rate(), clk_drop_range(), with a
+ * driver that will always try to run at the lowest possible rate.
+ */
static struct kunit_suite clk_range_minimize_test_suite = {
.name = "clk-range-minimize-test",
.init = clk_minimize_test_init,
@@ -890,11 +2128,284 @@ static struct kunit_suite clk_range_minimize_test_suite = {
.test_cases = clk_range_minimize_test_cases,
};
+struct clk_leaf_mux_ctx {
+ struct clk_multiple_parent_ctx mux_ctx;
+ struct clk_hw hw;
+};
+
+static int
+clk_leaf_mux_set_rate_parent_test_init(struct kunit *test)
+{
+ struct clk_leaf_mux_ctx *ctx;
+ const char *top_parents[2] = { "parent-0", "parent-1" };
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+
+ ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
+ if (ret)
+ return ret;
+
+ ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
+ if (ret)
+ return ret;
+
+ ctx->mux_ctx.current_parent = 0;
+ ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
+ &clk_multiple_parents_mux_ops,
+ 0);
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
+ if (ret)
+ return ret;
+
+ ctx->hw.init = CLK_HW_INIT_HW("test-clock", &ctx->mux_ctx.hw,
+ &clk_dummy_single_parent_ops,
+ CLK_SET_RATE_PARENT);
+ ret = clk_hw_register(NULL, &ctx->hw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void clk_leaf_mux_set_rate_parent_test_exit(struct kunit *test)
+{
+ struct clk_leaf_mux_ctx *ctx = test->priv;
+
+ clk_hw_unregister(&ctx->hw);
+ clk_hw_unregister(&ctx->mux_ctx.hw);
+ clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
+ clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
+}
+
+/*
+ * Test that, for a clock that will forward any rate request to its
+ * parent, the rate request structure returned by __clk_determine_rate
+ * is sane and will be what we expect.
+ */
+static void clk_leaf_mux_set_rate_parent_determine_rate(struct kunit *test)
+{
+ struct clk_leaf_mux_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk_rate_request req;
+ unsigned long rate;
+ int ret;
+
+ rate = clk_get_rate(clk);
+ KUNIT_ASSERT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
+
+ clk_hw_init_rate_request(hw, &req, DUMMY_CLOCK_RATE_2);
+
+ ret = __clk_determine_rate(hw, &req);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, req.rate, DUMMY_CLOCK_RATE_2);
+ KUNIT_EXPECT_EQ(test, req.best_parent_rate, DUMMY_CLOCK_RATE_2);
+ KUNIT_EXPECT_PTR_EQ(test, req.best_parent_hw, &ctx->mux_ctx.hw);
+
+ clk_put(clk);
+}
+
+static struct kunit_case clk_leaf_mux_set_rate_parent_test_cases[] = {
+ KUNIT_CASE(clk_leaf_mux_set_rate_parent_determine_rate),
+ {}
+};
+
+/*
+ * Test suite for a clock whose parent is a mux with multiple parents.
+ * The leaf clock has CLK_SET_RATE_PARENT, and will forward rate
+ * requests to the mux, which will then select which parent is the best
+ * fit for a given rate.
+ *
+ * These tests exercise the behaviour of muxes, and the proper selection
+ * of parents.
+ */
+static struct kunit_suite clk_leaf_mux_set_rate_parent_test_suite = {
+ .name = "clk-leaf-mux-set-rate-parent",
+ .init = clk_leaf_mux_set_rate_parent_test_init,
+ .exit = clk_leaf_mux_set_rate_parent_test_exit,
+ .test_cases = clk_leaf_mux_set_rate_parent_test_cases,
+};
+
+struct clk_mux_notifier_rate_change {
+ bool done;
+ unsigned long old_rate;
+ unsigned long new_rate;
+ wait_queue_head_t wq;
+};
+
+struct clk_mux_notifier_ctx {
+ struct clk_multiple_parent_ctx mux_ctx;
+ struct clk *clk;
+ struct notifier_block clk_nb;
+ struct clk_mux_notifier_rate_change pre_rate_change;
+ struct clk_mux_notifier_rate_change post_rate_change;
+};
+
+#define NOTIFIER_TIMEOUT_MS 100
+
+static int clk_mux_notifier_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct clk_notifier_data *clk_data = data;
+ struct clk_mux_notifier_ctx *ctx = container_of(nb,
+ struct clk_mux_notifier_ctx,
+ clk_nb);
+
+ if (action & PRE_RATE_CHANGE) {
+ ctx->pre_rate_change.old_rate = clk_data->old_rate;
+ ctx->pre_rate_change.new_rate = clk_data->new_rate;
+ ctx->pre_rate_change.done = true;
+ wake_up_interruptible(&ctx->pre_rate_change.wq);
+ }
+
+ if (action & POST_RATE_CHANGE) {
+ ctx->post_rate_change.old_rate = clk_data->old_rate;
+ ctx->post_rate_change.new_rate = clk_data->new_rate;
+ ctx->post_rate_change.done = true;
+ wake_up_interruptible(&ctx->post_rate_change.wq);
+ }
+
+ return 0;
+}
+
+static int clk_mux_notifier_test_init(struct kunit *test)
+{
+ struct clk_mux_notifier_ctx *ctx;
+ const char *top_parents[2] = { "parent-0", "parent-1" };
+ int ret;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ test->priv = ctx;
+ ctx->clk_nb.notifier_call = clk_mux_notifier_callback;
+ init_waitqueue_head(&ctx->pre_rate_change.wq);
+ init_waitqueue_head(&ctx->post_rate_change.wq);
+
+ ctx->mux_ctx.parents_ctx[0].hw.init = CLK_HW_INIT_NO_PARENT("parent-0",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->mux_ctx.parents_ctx[0].rate = DUMMY_CLOCK_RATE_1;
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[0].hw);
+ if (ret)
+ return ret;
+
+ ctx->mux_ctx.parents_ctx[1].hw.init = CLK_HW_INIT_NO_PARENT("parent-1",
+ &clk_dummy_rate_ops,
+ 0);
+ ctx->mux_ctx.parents_ctx[1].rate = DUMMY_CLOCK_RATE_2;
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.parents_ctx[1].hw);
+ if (ret)
+ return ret;
+
+ ctx->mux_ctx.current_parent = 0;
+ ctx->mux_ctx.hw.init = CLK_HW_INIT_PARENTS("test-mux", top_parents,
+ &clk_multiple_parents_mux_ops,
+ 0);
+ ret = clk_hw_register(NULL, &ctx->mux_ctx.hw);
+ if (ret)
+ return ret;
+
+ ctx->clk = clk_hw_get_clk(&ctx->mux_ctx.hw, NULL);
+ ret = clk_notifier_register(ctx->clk, &ctx->clk_nb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void clk_mux_notifier_test_exit(struct kunit *test)
+{
+ struct clk_mux_notifier_ctx *ctx = test->priv;
+ struct clk *clk = ctx->clk;
+
+ clk_notifier_unregister(clk, &ctx->clk_nb);
+ clk_put(clk);
+
+ clk_hw_unregister(&ctx->mux_ctx.hw);
+ clk_hw_unregister(&ctx->mux_ctx.parents_ctx[0].hw);
+ clk_hw_unregister(&ctx->mux_ctx.parents_ctx[1].hw);
+}
+
+/*
+ * Test that if the we have a notifier registered on a mux, the core
+ * will notify us when we switch to another parent, and with the proper
+ * old and new rates.
+ */
+static void clk_mux_notifier_set_parent_test(struct kunit *test)
+{
+ struct clk_mux_notifier_ctx *ctx = test->priv;
+ struct clk_hw *hw = &ctx->mux_ctx.hw;
+ struct clk *clk = clk_hw_get_clk(hw, NULL);
+ struct clk *new_parent = clk_hw_get_clk(&ctx->mux_ctx.parents_ctx[1].hw, NULL);
+ int ret;
+
+ ret = clk_set_parent(clk, new_parent);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = wait_event_interruptible_timeout(ctx->pre_rate_change.wq,
+ ctx->pre_rate_change.done,
+ msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_EQ(test, ctx->pre_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
+
+ ret = wait_event_interruptible_timeout(ctx->post_rate_change.wq,
+ ctx->post_rate_change.done,
+ msecs_to_jiffies(NOTIFIER_TIMEOUT_MS));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, ctx->post_rate_change.old_rate, DUMMY_CLOCK_RATE_1);
+ KUNIT_EXPECT_EQ(test, ctx->post_rate_change.new_rate, DUMMY_CLOCK_RATE_2);
+
+ clk_put(new_parent);
+ clk_put(clk);
+}
+
+static struct kunit_case clk_mux_notifier_test_cases[] = {
+ KUNIT_CASE(clk_mux_notifier_set_parent_test),
+ {}
+};
+
+/*
+ * Test suite for a mux with multiple parents, and a notifier registered
+ * on the mux.
+ *
+ * These tests exercise the behaviour of notifiers.
+ */
+static struct kunit_suite clk_mux_notifier_test_suite = {
+ .name = "clk-mux-notifier",
+ .init = clk_mux_notifier_test_init,
+ .exit = clk_mux_notifier_test_exit,
+ .test_cases = clk_mux_notifier_test_cases,
+};
+
kunit_test_suites(
+ &clk_leaf_mux_set_rate_parent_test_suite,
&clk_test_suite,
+ &clk_multiple_parents_mux_test_suite,
+ &clk_mux_notifier_test_suite,
+ &clk_orphan_transparent_multiple_parent_mux_test_suite,
&clk_orphan_transparent_single_parent_test_suite,
+ &clk_orphan_two_level_root_last_test_suite,
&clk_range_test_suite,
&clk_range_maximize_test_suite,
- &clk_range_minimize_test_suite
+ &clk_range_minimize_test_suite,
+ &clk_single_parent_mux_test_suite,
+ &clk_uncached_test_suite
);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 4421e4859257..ba1720b9e231 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -129,9 +129,18 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
return 0;
}
+static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+
+ return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
+}
+
const struct clk_ops mtk_mux_clr_set_upd_ops = {
.get_parent = mtk_clk_mux_get_parent,
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops);
@@ -141,6 +150,7 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
.is_enabled = mtk_clk_mux_is_enabled,
.get_parent = mtk_clk_mux_get_parent,
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 609c10f8d0d9..76551534f10d 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -915,6 +915,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
req->best_parent_hw = p2;
}
+ clk_hw_get_rate_range(req->best_parent_hw,
+ &parent_req.min_rate, &parent_req.max_rate);
+
+ if (req->min_rate > parent_req.min_rate)
+ parent_req.min_rate = req->min_rate;
+
+ if (req->max_rate < parent_req.max_rate)
+ parent_req.max_rate = req->max_rate;
+
ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index 657e1154bb9b..a9eb6a9ac445 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2767,17 +2767,6 @@ MODULE_DEVICE_TABLE(of, gcc_msm8660_match_table);
static int gcc_msm8660_probe(struct platform_device *pdev)
{
- int ret;
- struct device *dev = &pdev->dev;
-
- ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 19200000);
- if (ret)
- return ret;
-
- ret = qcom_cc_register_board_clk(dev, "pxo_board", "pxo", 27000000);
- if (ret)
- return ret;
-
return qcom_cc_probe(pdev, &gcc_msm8660_desc);
}
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 41717ff707f6..ba8791303156 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/clk/spear.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 490701ac9e93..c192a9141b86 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -7,6 +7,7 @@
*/
#include <linux/clkdev.h>
+#include <linux/clk/spear.h>
#include <linux/io.h>
#include <linux/spinlock_types.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index f7405a58877e..73303458e886 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1166,6 +1166,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA114_CLK_I2S3_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA114_CLK_I2S4_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA114_CLK_VIMCLK_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA114_CLK_PWM, TEGRA114_CLK_PLL_P, 408000000, 0 },
/* must be the last entry */
{ TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index a9d4efcef2d4..6c46592d794e 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1330,6 +1330,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_I2S3_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA124_CLK_I2S4_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA124_CLK_VIMCLK_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA124_CLK_PWM, TEGRA124_CLK_PLL_P, 408000000, 0 },
/* must be the last entry */
{ TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 8a4514f6d503..422d78247553 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -1044,6 +1044,7 @@ static struct tegra_clk_init_table init_table[] = {
{ TEGRA20_CLK_GR2D, TEGRA20_CLK_PLL_C, 300000000, 0 },
{ TEGRA20_CLK_GR3D, TEGRA20_CLK_PLL_C, 300000000, 0 },
{ TEGRA20_CLK_VDE, TEGRA20_CLK_PLL_C, 300000000, 0 },
+ { TEGRA20_CLK_PWM, TEGRA20_CLK_PLL_P, 48000000, 0 },
/* must be the last entry */
{ TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 499f999e91e1..a3488aaac3f7 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3597,6 +3597,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
{ TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
{ TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
+ { TEGRA210_CLK_PWM, TEGRA210_CLK_PLL_P, 48000000, 0 },
/* This MUST be the last entry. */
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 168c07d5a5f2..60f1534711f1 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1237,6 +1237,7 @@ static struct tegra_clk_init_table init_table[] = {
{ TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
{ TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
+ { TEGRA30_CLK_PWM, TEGRA30_CLK_PLL_P, 48000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index acf31cc1dbcc..97086fab698e 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -48,7 +48,7 @@ void hmem_register_device(int target_nid, struct resource *r)
rc = platform_device_add_data(pdev, &info, sizeof(info));
if (rc < 0) {
pr_err("hmem memregion_info allocation failure for %pr\n", &res);
- goto out_pdev;
+ goto out_resource;
}
rc = platform_device_add_resources(pdev, &res, 1);
@@ -66,7 +66,7 @@ void hmem_register_device(int target_nid, struct resource *r)
return;
out_resource:
- put_device(&pdev->dev);
+ platform_device_put(pdev);
out_pdev:
memregion_free(id);
}
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 9b5e2a5eb0ae..da4438f3188c 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -363,7 +363,7 @@ static void dax_free_inode(struct inode *inode)
{
struct dax_device *dax_dev = to_dax_dev(inode);
if (inode->i_rdev)
- ida_simple_remove(&dax_minor_ida, iminor(inode));
+ ida_free(&dax_minor_ida, iminor(inode));
kmem_cache_free(dax_cache, dax_dev);
}
@@ -445,7 +445,7 @@ struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
if (WARN_ON_ONCE(ops && !ops->zero_page_range))
return ERR_PTR(-EINVAL);
- minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
+ minor = ida_alloc_max(&dax_minor_ida, MINORMASK, GFP_KERNEL);
if (minor < 0)
return ERR_PTR(-ENOMEM);
@@ -459,7 +459,7 @@ struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
return dax_dev;
err_dev:
- ida_simple_remove(&dax_minor_ida, minor);
+ ida_free(&dax_minor_ida, minor);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(alloc_dax);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 9fe2ae794316..ffe621695e47 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -312,7 +312,7 @@ static unsigned long dmatest_random(void)
{
unsigned long buf;
- prandom_bytes(&buf, sizeof(buf));
+ get_random_bytes(&buf, sizeof(buf));
return buf;
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 17562cf1fe97..456602d373b7 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -473,7 +473,7 @@ config EDAC_ALTERA_SDMMC
config EDAC_SIFIVE
bool "Sifive platform EDAC driver"
- depends on EDAC=y && SIFIVE_L2
+ depends on EDAC=y && SIFIVE_CCACHE
help
Support for error detection and correction on the SiFive SoCs.
diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c
index ee800aec7d47..b844e2626fd5 100644
--- a/drivers/edac/sifive_edac.c
+++ b/drivers/edac/sifive_edac.c
@@ -2,7 +2,7 @@
/*
* SiFive Platform EDAC Driver
*
- * Copyright (C) 2018-2019 SiFive, Inc.
+ * Copyright (C) 2018-2022 SiFive, Inc.
*
* This driver is partially based on octeon_edac-pc.c
*
@@ -10,7 +10,7 @@
#include <linux/edac.h>
#include <linux/platform_device.h>
#include "edac_module.h"
-#include <soc/sifive/sifive_l2_cache.h>
+#include <soc/sifive/sifive_ccache.h>
#define DRVNAME "sifive_edac"
@@ -32,9 +32,9 @@ int ecc_err_event(struct notifier_block *this, unsigned long event, void *ptr)
p = container_of(this, struct sifive_edac_priv, notifier);
- if (event == SIFIVE_L2_ERR_TYPE_UE)
+ if (event == SIFIVE_CCACHE_ERR_TYPE_UE)
edac_device_handle_ue(p->dci, 0, 0, msg);
- else if (event == SIFIVE_L2_ERR_TYPE_CE)
+ else if (event == SIFIVE_CCACHE_ERR_TYPE_CE)
edac_device_handle_ce(p->dci, 0, 0, msg);
return NOTIFY_OK;
@@ -67,7 +67,7 @@ static int ecc_register(struct platform_device *pdev)
goto err;
}
- register_sifive_l2_error_notifier(&p->notifier);
+ register_sifive_ccache_error_notifier(&p->notifier);
return 0;
@@ -81,7 +81,7 @@ static int ecc_unregister(struct platform_device *pdev)
{
struct sifive_edac_priv *p = platform_get_drvdata(pdev);
- unregister_sifive_l2_error_notifier(&p->notifier);
+ unregister_sifive_ccache_error_notifier(&p->notifier);
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(p->dci);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 9e98f3866edc..03bbfaa51cbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -75,9 +75,6 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
adev->kfd.dev = kgd2kfd_probe(adev, vf);
-
- if (adev->kfd.dev)
- amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -201,6 +198,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
adev_to_drm(adev), &gpu_resources);
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
+
INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
}
}
@@ -210,6 +209,7 @@ void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
if (adev->kfd.dev) {
kgd2kfd_device_exit(adev->kfd.dev);
adev->kfd.dev = NULL;
+ amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 23998f727c7f..1a06b8d724f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,8 +38,6 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_damage_helper.h>
-#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -500,12 +498,6 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
};
-static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
- .destroy = drm_gem_fb_destroy,
- .create_handle = drm_gem_fb_create_handle,
- .dirty = drm_atomic_helper_dirtyfb,
-};
-
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1108,10 +1100,8 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- if (drm_drv_uses_atomic_modeset(dev))
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
- else
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e6a9b9fc9e0b..2e8f6cd7a729 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -688,13 +688,16 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
* num of amdgpu_vm_pt entries.
*/
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
- bp->destroy = &amdgpu_bo_vm_destroy;
r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
+ /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
+ * is initialized.
+ */
+ bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ccebd8e2a2d8..2dad7aa9a03b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -2877,9 +2877,9 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
err_data.err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
sizeof(struct eeprom_table_record), GFP_KERNEL);
- if(!err_data.err_addr) {
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record in mca notifier!\n");
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error record in mca notifier!\n");
return NOTIFY_DONE;
}
@@ -2889,7 +2889,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
if (adev->umc.ras &&
adev->umc.ras->convert_ras_error_address)
adev->umc.ras->convert_ras_error_address(adev,
- &err_data, 0, ch_inst, umc_inst, m->addr);
+ &err_data, m->addr, ch_inst, umc_inst);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 3949b7e3907f..ea5278f094c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -222,8 +222,10 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
adev->sdma.instance[instance].fw->data;
version_major = le16_to_cpu(header->header_version_major);
- if ((duplicate && instance) || (!duplicate && version_major > 1))
- return -EINVAL;
+ if ((duplicate && instance) || (!duplicate && version_major > 1)) {
+ err = -EINVAL;
+ goto out;
+ }
err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
if (err)
@@ -272,7 +274,7 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
break;
default:
- return -EINVAL;
+ err = -EINVAL;
}
}
@@ -283,3 +285,24 @@ out:
}
return err;
}
+
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue) {
+ sdma = &adev->sdma.instance[i].page;
+ if (adev->mman.buffer_funcs_ring == sdma) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ break;
+ }
+ }
+ sdma = &adev->sdma.instance[i].ring;
+ if (adev->mman.buffer_funcs_ring == sdma) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index d2d88279fefb..7d99205c2e01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -128,4 +128,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
char *fw_name, u32 instance, bool duplicate);
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate);
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b1c455329023..dc262d2c2925 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -424,8 +424,9 @@ error:
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
- uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+ u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT;
struct amdgpu_res_cursor cursor;
+ u64 end;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
@@ -434,12 +435,18 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
return false;
amdgpu_res_first(mem, 0, mem_size, &cursor);
+ end = cursor.start + cursor.size;
+ while (cursor.remaining) {
+ amdgpu_res_next(&cursor, cursor.size);
- /* ttm_resource_ioremap only supports contiguous memory */
- if (cursor.size != mem_size)
- return false;
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (end != cursor.start)
+ return false;
+
+ end = cursor.start + cursor.size;
+ }
- return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
+ return end <= adev->gmc.visible_vram_size;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 2fb4951a6433..e46439274f3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -22,8 +22,6 @@
#define __AMDGPU_UMC_H__
#include "amdgpu_ras.h"
-#define UMC_INVALID_ADDR 0x1ULL
-
/*
* (addr / 256) * 4096, the higher 26 bits in ErrorAddr
* is the index of 4KB block
@@ -54,9 +52,8 @@ struct amdgpu_umc_ras {
void (*err_cnt_init)(struct amdgpu_device *adev);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
void (*convert_ras_error_address)(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- uint32_t umc_reg_offset, uint32_t ch_inst,
- uint32_t umc_inst, uint64_t mca_addr);
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst);
void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5647f13b98d4..cbca9866645c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
*/
static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6bdffdc1c0b9..c52d246a1d96 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2584fa3cb13e..486d9b5c1b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7241a9fb0121..298fa11702e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -915,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
- int i, unset = 0;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sdma[i] = &adev->sdma.instance[i].ring;
+ int i;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = 1;
- }
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
@@ -957,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
- bool unset = false;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sdma[i] = &adev->sdma.instance[i].page;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (!unset)) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = true;
- }
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
RB_ENABLE, 0);
@@ -1954,8 +1940,11 @@ static int sdma_v4_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index c05c3eebde4c..d4d9f196db83 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -584,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -1460,8 +1456,11 @@ static int sdma_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_0_ctx_switch_enable(adev, false);
sdma_v5_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index f136fec7b4f4..809eca54fc61 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -414,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
- struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring;
- struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1) ||
- (adev->mman.buffer_funcs_ring == sdma2) ||
- (adev->mman.buffer_funcs_ring == sdma3))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -1357,8 +1349,11 @@ static int sdma_v5_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_2_ctx_switch_enable(adev, false);
sdma_v5_2_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index db51230163c5..da3beb0bf2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -398,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
@@ -415,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
}
-
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -846,7 +839,8 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_cntl =
order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
- 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+ 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+ 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
@@ -1317,8 +1311,11 @@ static int sdma_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index f675111ace20..4d5e718540aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev)
u32 rb_cntl;
unsigned i;
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
/* dma0 */
rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 16b757664a35..795706b3b092 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -629,6 +629,7 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 939cb203f7ad..f17d297b594b 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -327,10 +327,9 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
return;
}
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
@@ -343,10 +342,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
+ amdgpu_umc_fill_error_record(err_data, err_addr,
retired_page, channel_index, umc_inst);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index a0d19b768346..5d5d031c9e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -187,20 +187,51 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
}
}
+static void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ uint32_t channel_index;
+ uint64_t soc_pa, retired_page, column;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+ /* translate umc channel address to soc pa, 3 parts are included */
+ soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* The umc channel bits are not original values, they are hashed */
+ SET_CHANNEL_HASH(channel_index, soc_pa);
+
+ /* clear [C4 C3 C2] in soc physical address */
+ soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+
+ /* shift R14 bit */
+ retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+ }
+}
+
static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column;
- uint32_t channel_index;
+ uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
@@ -209,42 +240,15 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* The umc channel bits are not original values, they are hashed */
- SET_CHANNEL_HASH(channel_index, soc_pa);
-
- /* clear [C4 C3 C2] in soc physical address */
- soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
-
- /* shift R14 bit */
- retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
- }
- }
+ umc_v6_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
}
@@ -453,81 +457,40 @@ static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t umc_reg_offset, uint32_t ch_inst,
- uint32_t umc_inst, uint64_t mca_addr)
+ uint32_t umc_inst)
{
uint32_t mc_umc_status_addr;
- uint32_t channel_index;
- uint64_t mc_umc_status = 0, mc_umc_addrt0;
- uint64_t err_addr, soc_pa, retired_page, column;
+ uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr;
- if (mca_addr == UMC_INVALID_ADDR) {
- mc_umc_status_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
- mc_umc_addrt0 =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
- mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
- if (mc_umc_status == 0)
- return;
+ if (mc_umc_status == 0)
+ return;
- if (!err_data->err_addr) {
- /* clear umc status */
- WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
- return;
- }
+ if (!err_data->err_addr) {
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ return;
}
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
- /* calculate error address if ue/ce error is detected */
- if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) ||
- mca_addr != UMC_INVALID_ADDR) {
- if (mca_addr == UMC_INVALID_ADDR) {
- err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
- err_addr =
- REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- } else {
- err_addr = mca_addr;
- }
+ /* calculate error address if ue error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+ err_addr =
+ REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* The umc channel bits are not original values, they are hashed */
- SET_CHANNEL_HASH(channel_index, soc_pa);
-
- /* clear [C4 C3 C2] in soc physical address */
- soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1 ||
- mca_addr != UMC_INVALID_ADDR) {
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
-
- /* shift R14 bit */
- retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
- }
- }
+ umc_v6_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
/* clear umc status */
- if (mca_addr == UMC_INVALID_ADDR)
- WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
}
static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
@@ -549,7 +512,7 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
umc_v6_7_query_error_address(adev,
err_data,
umc_reg_offset, ch_inst,
- umc_inst, UMC_INVALID_ADDR);
+ umc_inst);
}
}
@@ -590,5 +553,5 @@ struct amdgpu_umc_ras umc_v6_7_ras = {
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
- .convert_ras_error_address = umc_v6_7_query_error_address,
+ .convert_ras_error_address = umc_v6_7_convert_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index a8cbda81828d..91235df54e22 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -208,7 +208,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
{
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
- uint32_t channel_index;
+ uint64_t mc_umc_addrt0, na_err_addr_base;
+ uint64_t na_err_addr, retired_page_addr;
+ uint32_t channel_index, addr_lsb, col = 0;
+ int ret = 0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -229,13 +232,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
umc_inst * adev->umc.channel_inst_num +
ch_inst];
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
- uint32_t addr_lsb;
- uint64_t mc_umc_addrt0;
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
@@ -243,32 +243,24 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
/* the lowest lsb bits should be ignored */
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
-
err_addr &= ~((0x1ULL << addr_lsb) - 1);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
- uint64_t na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
- uint64_t na_err_addr, retired_page_addr;
- uint32_t col = 0;
- int ret = 0;
-
- /* loop for all possibilities of [C6 C5] in normal address. */
- for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
- na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
-
- /* Mapping normal error address to retired soc physical address. */
- ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
- na_err_addr, &retired_page_addr);
- if (ret) {
- dev_err(adev->dev, "Failed to map pa from umc na.\n");
- break;
- }
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
- retired_page_addr);
- amdgpu_umc_fill_error_record(err_data, na_err_addr,
- retired_page_addr, channel_index, umc_inst);
+ na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
+
+ /* loop for all possibilities of [C6 C5] in normal address. */
+ for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
+ na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
+
+ /* Mapping normal error address to retired soc physical address. */
+ ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
+ na_err_addr, &retired_page_addr);
+ if (ret) {
+ dev_err(adev->dev, "Failed to map pa from umc na.\n");
+ break;
}
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
+ retired_page_addr);
+ amdgpu_umc_fill_error_record(err_data, na_err_addr,
+ retired_page_addr, channel_index, umc_inst);
}
}
@@ -338,6 +330,31 @@ static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
}
}
+static uint32_t umc_v8_10_query_ras_poison_mode_per_channel(
+ struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_ctrl_addr, ecc_ctrl;
+
+ ecc_ctrl_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl);
+ ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
+ umc_reg_offset) * 4);
+
+ return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn);
+}
+
+static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t umc_reg_offset = 0;
+
+ /* Enabling fatal error in umc node0 instance0 channel0 will be
+ * considered as fatal error mode
+ */
+ umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0);
+ return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
+}
+
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
@@ -348,4 +365,5 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
.hw_ops = &umc_v8_10_ras_hw_ops,
},
.err_cnt_init = umc_v8_10_err_cnt_init,
+ .query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index f35253e0eaa6..b717fdaa46e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -108,20 +108,35 @@ static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
}
}
+static void umc_v8_7_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ uint64_t retired_page;
+ uint32_t channel_index;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+}
+
static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, retired_page;
- uint32_t channel_index;
+ uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
@@ -130,24 +145,15 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
+ umc_v8_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
}
@@ -324,14 +330,12 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
uint32_t umc_inst)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
- uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+ uint64_t mc_umc_status, err_addr, mc_umc_addrt0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
mc_umc_addrt0 =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
-
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (mc_umc_status == 0)
@@ -343,10 +347,9 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
return;
}
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
@@ -354,16 +357,8 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
err_addr &= ~((0x1ULL << lsb) - 1);
- /* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
+ umc_v8_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
/* clear umc status */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index c70c026c9a93..2797029bd500 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -223,7 +223,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- lock_page(page);
+ zone_device_page_init(page);
}
static void
@@ -410,7 +410,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t npages = (end - start) >> PAGE_SHIFT;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
- struct migrate_vma migrate;
+ struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
dma_addr_t *scratch;
void *buf;
@@ -666,7 +666,7 @@ out_oom:
static long
svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end,
- uint32_t trigger)
+ uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
@@ -674,7 +674,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long cpages = 0;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
- struct migrate_vma migrate;
+ struct migrate_vma migrate = { 0 };
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
@@ -697,6 +697,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.src = buf;
migrate.dst = migrate.src + npages;
+ migrate.fault_page = fault_page;
scratch = (dma_addr_t *)(migrate.dst + npages);
kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
@@ -764,7 +765,7 @@ out:
* 0 - OK, otherwise error code
*/
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
- uint32_t trigger)
+ uint32_t trigger, struct page *fault_page)
{
struct amdgpu_device *adev;
struct vm_area_struct *vma;
@@ -805,7 +806,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger);
+ r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
+ fault_page);
if (r < 0) {
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
@@ -849,7 +851,7 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
- r = svm_migrate_vram_to_ram(prange, mm, trigger);
+ r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
if (r)
return r;
} while (prange->actual_loc && --retries);
@@ -950,7 +952,8 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
}
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
+ vmf->page);
if (r)
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
r, prange->svms, prange, prange->start, prange->last);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index b3f0754b32fa..a5d7e6d22264 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -43,7 +43,7 @@ enum MIGRATION_COPY_DIR {
int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
struct mm_struct *mm, uint32_t trigger);
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
- uint32_t trigger);
+ uint32_t trigger, struct page *fault_page);
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 26b53b6d673e..4f6390f3236e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -333,7 +333,8 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
<< SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
- 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+ 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+ 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index f5913ba22174..64fdf63093a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2913,13 +2913,15 @@ retry_write_locked:
*/
if (prange->actual_loc)
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
+ NULL);
else
r = 0;
}
} else {
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
+ NULL);
}
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
@@ -3278,7 +3280,8 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
return 0;
if (!best_loc) {
- r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
+ r = svm_migrate_vram_to_ram(prange, mm,
+ KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
*migrated = !r;
return r;
}
@@ -3339,7 +3342,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_TTM_EVICTION);
+ KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
} while (!r && prange->actual_loc && --retries);
if (!r && prange->actual_loc)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4c73727e0b7d..c053cb79cd06 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1110,7 +1110,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hw_params.fb[i] = &fb_info->fb[i];
switch (adev->ip_versions[DCE_HWIP][0]) {
- case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
break;
@@ -7478,15 +7479,15 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
* We also need vupdate irq for the actual core vblank handling
* at end of vblank.
*/
- dm_set_vupdate_irq(new_state->base.crtc, true);
- drm_crtc_vblank_get(new_state->base.crtc);
+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
* Allow vblank irq disable again for fixed refresh rate.
*/
- dm_set_vupdate_irq(new_state->base.crtc, false);
+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -8242,23 +8243,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_unlock(&dm->dc_lock);
}
- /* Count number of newly disabled CRTCs for dropping PM refs later. */
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (old_crtc_state->active && !new_crtc_state->active)
- crtc_disable_count++;
-
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
- /* For freesync config update on crtc state and params for irq */
- update_stream_irq_parameters(dm, dm_new_crtc_state);
-
- /* Handle vrr on->off / off->on transitions */
- amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
- dm_new_crtc_state);
- }
-
/**
* Enable interrupts for CRTCs that are newly enabled or went through
* a modeset. It was intentionally deferred until after the front end
@@ -8268,16 +8252,29 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
#ifdef CONFIG_DEBUG_FS
- bool configure_crc = false;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
+ struct crc_rd_work *crc_rd_wrk;
+#endif
+#endif
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* For freesync config update on crtc state and params for irq */
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ crc_rd_wrk = dm->crc_rd_wrk;
#endif
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
cur_crc_src = acrtc->dm_irq_params.crc_src;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
#endif
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (new_crtc_state->active &&
(!old_crtc_state->active ||
@@ -8285,16 +8282,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
+ }
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
#ifdef CONFIG_DEBUG_FS
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
/**
* Frontend may have changed so reapply the CRC capture
* settings for the stream.
*/
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
- configure_crc = true;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
@@ -8306,14 +8306,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
- }
-
- if (configure_crc)
if (amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, cur_crc_src))
DRM_DEBUG_DRIVER("Failed to configure crc source");
-#endif
+ }
}
+#endif
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
@@ -9392,10 +9390,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
}
- if (!pre_validate_dsc(state, &dm_state, vars)) {
- ret = -EINVAL;
- goto fail;
- }
}
#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -9529,6 +9523,15 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ if (!pre_validate_dsc(state, &dm_state, vars)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+#endif
+
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
if (ret) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 8ca10ab3dfc1..26291db0a3cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -60,11 +60,15 @@ static bool link_supports_psrsu(struct dc_link *link)
*/
void amdgpu_dm_set_psr_caps(struct dc_link *link)
{
- if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP)) {
+ link->psr_settings.psr_feature_enabled = false;
return;
+ }
- if (link->type == dc_connection_none)
+ if (link->type == dc_connection_none) {
+ link->psr_settings.psr_feature_enabled = false;
return;
+ }
if (link->dpcd_caps.psr_info.psr_version == 0) {
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 53b077b40d72..ee0456b5e14e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -51,13 +51,6 @@
#define LAST_RECORD_TYPE 0xff
#define SMU9_SYSPLL0_ID 0
-struct i2c_id_config_access {
- uint8_t bfI2C_LineMux:4;
- uint8_t bfHW_EngineID:3;
- uint8_t bfHW_Capable:1;
- uint8_t ucAccess;
-};
-
static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
struct atom_i2c_record *record,
struct graphics_object_i2c_info *info);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 0d30d1d9d67e..650f3b4b562e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -179,7 +179,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
} else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) {
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, 126);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 100);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
@@ -206,7 +206,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 897105d1c111..ef0795b14a1f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -339,29 +339,24 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
if (!clk_mgr->smu_present)
return;
- if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface &&
- (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
- support = DCN_ZSTATE_SUPPORT_DISALLOW;
-
-
// Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits
// Arg[16] = Disallow Z9 -> new bit
switch (support) {
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 9;
+ param = (1 << 10) | (1 << 9) | (1 << 8);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 8;
+ param = 0;
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 0x00010008;
+ param = (1 << 10);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index f0f3f66629cc..1c612ccf1944 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -156,7 +156,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
- unsigned int num_dcfclk_levels, num_dtbclk_levels, num_dispclk_levels;
+ struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
@@ -180,27 +180,28 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
/* DCFCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
- &num_levels);
- num_dcfclk_levels = num_levels;
+ &num_entries_per_clk->num_dcfclk_levels);
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_socclk_levels);
+
/* DTBCLK */
if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch)
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
- &num_levels);
- num_dtbclk_levels = num_levels;
+ &num_entries_per_clk->num_dtbclk_levels);
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
- &num_levels);
- num_dispclk_levels = num_levels;
+ &num_entries_per_clk->num_dispclk_levels);
+ num_levels = num_entries_per_clk->num_dispclk_levels;
- if (num_dcfclk_levels && num_dtbclk_levels && num_dispclk_levels)
+ if (num_entries_per_clk->num_dcfclk_levels &&
+ num_entries_per_clk->num_dtbclk_levels &&
+ num_entries_per_clk->num_dispclk_levels)
clk_mgr->dpm_present = true;
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
@@ -333,6 +334,21 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (enter_display_off == safe_to_lower)
dcn30_smu_set_num_of_displays(clk_mgr, display_count);
+ clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
+
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
+
+ if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
+ clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+
+ /* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support) {
+ /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
+ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
+ }
+ }
+
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
@@ -352,7 +368,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
@@ -361,27 +376,25 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
}
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+
p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
}
- if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) &&
- clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21) {
- clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+ /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
+ if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
+ update_fclk = true;
+ }
- /* To disable FCLK P-state switching, send FCLK_PSTATE_NOTSUPPORTED message to PMFW */
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
- dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
- }
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
+ /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
+ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
@@ -390,21 +403,11 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
update_uclk = true;
}
- /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
- if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
- update_fclk = true;
- }
-
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
- }
-
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
clk_mgr_base->clks.num_ways = new_clocks->num_ways;
@@ -632,7 +635,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current
khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
else
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
@@ -648,22 +651,34 @@ static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
return;
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
}
/* Get current memclk states, update bounding box */
static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
unsigned int num_levels;
if (!clk_mgr->smu_present)
return;
- /* Refresh memclk states */
+ /* Refresh memclk and fclk states */
dcn32_init_single_clock(clk_mgr, PPCLK_UCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_memclk_levels);
+
+ dcn32_init_single_clock(clk_mgr, PPCLK_FCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
+ &num_entries_per_clk->num_fclk_levels);
+
+ if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
+ num_levels = num_entries_per_clk->num_memclk_levels;
+ } else {
+ num_levels = num_entries_per_clk->num_fclk_levels;
+ }
+
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
if (clk_mgr->dpm_present && !num_levels)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 258ba5a872b1..997ab031f816 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1734,10 +1734,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
struct dc_state *old_state;
+ bool subvp_prev_use = false;
dc_z10_restore(dc);
dc_allow_idle_optimizations(dc, false);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* Check old context for SubVP */
+ subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ if (subvp_prev_use)
+ break;
+ }
+
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
@@ -1777,6 +1787,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
result = dc->hwss.apply_ctx_to_hw(dc, context);
if (result != DC_OK) {
@@ -1794,6 +1807,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
+
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
+
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -2927,6 +2946,12 @@ static bool update_planes_and_stream_state(struct dc *dc,
dc_resource_state_copy_construct(
dc->current_state, context);
+ /* For each full update, remove all existing phantom pipes first.
+ * Ensures that we have enough pipes for newly added MPO planes
+ */
+ if (dc->res_pool->funcs->remove_phantom_pipes)
+ dc->res_pool->funcs->remove_phantom_pipes(dc, context);
+
/*remove old surfaces from context */
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
@@ -3334,8 +3359,14 @@ static void commit_planes_for_stream(struct dc *dc,
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ }
+
return;
}
@@ -3495,6 +3526,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
if (update_type != UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
@@ -3542,6 +3576,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
bool force_minimal_pipe_splitting = false;
+ uint32_t i;
*is_plane_addition = false;
@@ -3573,6 +3608,36 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
}
}
+ /* For SubVP pipe split case when adding MPO video
+ * we need to add a minimal transition. In this case
+ * there will be 2 streams (1 main stream, 1 phantom
+ * stream).
+ */
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 2 &&
+ stream->mall_stream_config.type == SUBVP_MAIN) {
+ bool is_pipe_split = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream &&
+ (dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe ||
+ dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) {
+ is_pipe_split = true;
+ break;
+ }
+ }
+
+ /* determine if minimal transition is required due to SubVP*/
+ if (surface_count > 0 && is_pipe_split) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
return force_minimal_pipe_splitting;
}
@@ -3582,6 +3647,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_context = dc_create_state(dc);
enum pipe_split_policy tmp_mpc_policy;
bool temp_dynamic_odm_policy;
+ bool temp_subvp_policy;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
@@ -3596,6 +3662,9 @@ static bool commit_minimal_transition_state(struct dc *dc,
temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
dc->debug.enable_single_display_2to1_odm_policy = false;
+ temp_subvp_policy = dc->debug.force_disable_subvp;
+ dc->debug.force_disable_subvp = true;
+
dc_resource_state_copy_construct(transition_base_context, transition_context);
//commit minimal state
@@ -3624,6 +3693,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
dc->debug.pipe_split_policy = tmp_mpc_policy;
dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
+ dc->debug.force_disable_subvp = temp_subvp_policy;
if (ret != DC_OK) {
/*this should never happen*/
@@ -4587,6 +4657,37 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
}
/**
+ *****************************************************************************
+ * Function: dc_process_dmub_dpia_hpd_int_enable
+ *
+ * @brief
+ * Submits dpia hpd int enable command to dmub via inbox message
+ *
+ * @param
+ * [in] dc: dc structure
+ * [in] hpd_int_enable: 1 for hpd int enable, 0 to disable
+ *
+ * @return
+ * None
+ *****************************************************************************
+ */
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+
+ cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
+ cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
+
+ dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
+
+ DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
+}
+
+/**
* dc_disable_accelerated_mode - disable accelerated mode
* @dc: dc structure
*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 3d19fb92333b..d7b1ace6328a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1307,7 +1307,10 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- // Init dc_panel_config
+ /* Init dc_panel_config by HW config */
+ if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults)
+ dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config);
+ /* Pickup base DM settings */
dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
// Override dc_panel_config if system has specific settings
dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
@@ -3143,7 +3146,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- if (allow_active && link->type == dc_connection_none) {
+ if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) {
// Don't enter PSR if panel is not connected
return false;
}
@@ -3375,8 +3378,8 @@ bool dc_link_setup_psr(struct dc_link *link,
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
case AMDGPU_FAMILY_GC_11_0_1:
- if(!dc->debug.disable_z10)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
+ if (dc->debug.disable_z10)
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
break;
default:
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index c57df45e83ff..1254d38f1778 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -944,6 +944,23 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
return status;
}
+static enum dc_status dpcd_128b_132b_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ enum dc_status status = core_link_write_dpcd(link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
+ sizeof(link_training_setting->dpcd_lane_settings));
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ return status;
+}
+
+
enum dc_status dpcd_set_lane_settings(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
@@ -964,16 +981,6 @@ enum dc_status dpcd_set_lane_settings(
link_training_setting->link_settings.lane_count);
if (is_repeater(link_training_setting, offset)) {
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- offset,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
@@ -985,14 +992,6 @@ enum dc_status dpcd_set_lane_settings(
link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
lane0_set_address,
@@ -2023,7 +2022,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
result = DP_128b_132b_LT_FAILED;
} else {
dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
- dpcd_set_lane_settings(link, lt_settings, DPRX);
+ dpcd_128b_132b_set_lane_settings(link, lt_settings);
}
loop_count++;
}
@@ -5090,6 +5089,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
(dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
ASSERT(0);
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
@@ -5098,6 +5098,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
if (is_lttpr_present)
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
return is_lttpr_present;
}
@@ -5134,6 +5135,7 @@ void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
} else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
*override = LTTPR_MODE_NON_LTTPR;
}
+ DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override));
}
enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
@@ -5146,22 +5148,34 @@ enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
return LTTPR_MODE_NON_LTTPR;
if (vbios_lttpr_aware) {
- if (vbios_lttpr_force_non_transparent)
+ if (vbios_lttpr_force_non_transparent) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
return LTTPR_MODE_NON_TRANSPARENT;
- else
+ } else {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
return LTTPR_MODE_TRANSPARENT;
+ }
}
if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- link->dc->caps.extended_aux_timeout_support)
+ link->dc->caps.extended_aux_timeout_support) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n");
return LTTPR_MODE_NON_TRANSPARENT;
+ }
+ DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n");
return LTTPR_MODE_NON_LTTPR;
}
enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
{
- return dp_is_lttpr_present(link) ? LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_NON_LTTPR;
+ enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR;
+
+ if (dp_is_lttpr_present(link))
+ mode = LTTPR_MODE_NON_TRANSPARENT;
+
+ DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode);
+ return mode;
}
static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
@@ -5179,9 +5193,10 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
link->dc, link->link_enc->transmitter);
if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
- cmd.cable_id.header.ret_status == 1)
+ cmd.cable_id.header.ret_status == 1) {
cable_id->raw = cmd.cable_id.data.output_raw;
-
+ DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
+ }
return cmd.cable_id.header.ret_status == 1;
}
@@ -5228,6 +5243,7 @@ static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout
lttpr_present = dp_is_lttpr_present(link) ||
(!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support);
+ DC_LOG_DC("lttpr_present = %d.\n", lttpr_present ? 1 : 0);
/* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to
* be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read.
@@ -5795,7 +5811,7 @@ void detect_edp_sink_caps(struct dc_link *link)
* Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
*/
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
- (link->dc->debug.optimize_edp_link_rate ||
+ (link->panel_config.ilr.optimize_edp_link_rate ||
link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
@@ -6744,7 +6760,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
ASSERT(link || crtc_timing); // invalid input
if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
- !link->dc->debug.optimize_edp_link_rate)
+ !link->panel_config.ilr.optimize_edp_link_rate)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 8ee0d946bb2f..fd8db482e56f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1747,7 +1747,6 @@ bool dc_remove_plane_from_context(
for (i = 0; i < stream_status->plane_count; i++) {
if (stream_status->plane_states[i] == plane_state) {
-
dc_plane_state_release(stream_status->plane_states[i]);
break;
}
@@ -3683,4 +3682,56 @@ bool is_h_timing_divisible_by_2(struct dc_stream_state *stream)
(stream->timing.h_sync_width % 2 == 0);
}
return divisible;
+}
+
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ const struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm)
+{
+ int pipe_idx = sec_pipe->pipe_idx;
+ struct pipe_ctx *sec_top, *sec_bottom, *sec_next, *sec_prev;
+ const struct resource_pool *pool = dc->res_pool;
+
+ sec_top = sec_pipe->top_pipe;
+ sec_bottom = sec_pipe->bottom_pipe;
+ sec_next = sec_pipe->next_odm_pipe;
+ sec_prev = sec_pipe->prev_odm_pipe;
+
+ *sec_pipe = *pri_pipe;
+
+ sec_pipe->top_pipe = sec_top;
+ sec_pipe->bottom_pipe = sec_bottom;
+ sec_pipe->next_odm_pipe = sec_next;
+ sec_pipe->prev_odm_pipe = sec_prev;
+
+ sec_pipe->pipe_idx = pipe_idx;
+ sec_pipe->plane_res.mi = pool->mis[pipe_idx];
+ sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
+ sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
+ sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
+ sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
+ sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
+ sec_pipe->stream_res.dsc = NULL;
+ if (odm) {
+ if (!sec_pipe->top_pipe)
+ sec_pipe->stream_res.opp = pool->opps[pipe_idx];
+ else
+ sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
+ if (sec_pipe->stream->timing.flags.DSC == 1) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+#endif
+ ASSERT(sec_pipe->stream_res.dsc);
+ if (sec_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
+#endif
+ }
+
+ return true;
} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index ae13887756bf..38d71b5c1f2d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -276,6 +276,8 @@ static void program_cursor_attributes(
}
dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
@@ -382,6 +384,8 @@ static void program_cursor_position(
}
dc->hwss.set_cursor_position(pipe_ctx);
+
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
if (pipe_to_program)
@@ -520,9 +524,9 @@ bool dc_stream_remove_writeback(struct dc *dc,
}
/* remove writeback info for disabled writeback pipes from stream */
- for (i = 0, j = 0; i < stream->num_wb_info && j < MAX_DWB_PIPES; i++) {
+ for (i = 0, j = 0; i < stream->num_wb_info; i++) {
if (stream->writeback_info[i].wb_enabled) {
- if (i != j)
+ if (j < i)
/* trim the array */
stream->writeback_info[j] = stream->writeback_info[i];
j++;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 2ecf36e6329b..bfc5474c0f4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.205"
+#define DC_VER "3.2.207"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -821,7 +821,6 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool disable_fams;
- bool optimize_edp_link_rate; /* eDP ILR */
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
@@ -1192,6 +1191,8 @@ struct dc_plane_state {
enum dc_irq_source irq_source;
struct kref refcount;
struct tg_color visual_confirm_color;
+
+ bool is_statically_allocated;
};
struct dc_plane_info {
@@ -1611,6 +1612,9 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint8_t mst_alloc_slots,
uint8_t *mst_slots_in_use);
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable);
+
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 89d7d3fd3321..0541e87e4f38 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -30,6 +30,7 @@
#include "dc_hw_types.h"
#include "core_types.h"
#include "../basics/conversion.h"
+#include "cursor_reg_cache.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -780,7 +781,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
// Store the original watermark value for this SubVP config so we can lower it when the
// MCLK switch starts
wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
- dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 / 1000;
+ (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
}
@@ -880,3 +881,147 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
diag_data.is_cw0_enabled,
diag_data.is_cw6_enabled);
}
+
+static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state != NULL) {
+ if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ return false;
+ }
+
+ if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+ pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
+ pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
+ return true;
+
+ return false;
+}
+
+static void dc_build_cursor_update_payload0(
+ struct pipe_ctx *pipe_ctx, uint8_t p_idx,
+ struct dmub_cmd_update_cursor_payload0 *payload)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ unsigned int panel_inst = 0;
+
+ if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
+ pipe_ctx->stream->link, &panel_inst))
+ return;
+
+ /* Payload: Cursor Rect is built from position & attribute
+ * x & y are obtained from postion
+ */
+ payload->cursor_rect.x = hubp->cur_rect.x;
+ payload->cursor_rect.y = hubp->cur_rect.y;
+ /* w & h are obtained from attribute */
+ payload->cursor_rect.width = hubp->cur_rect.w;
+ payload->cursor_rect.height = hubp->cur_rect.h;
+
+ payload->enable = hubp->pos.cur_ctl.bits.cur_enable;
+ payload->pipe_idx = p_idx;
+ payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+ payload->panel_inst = panel_inst;
+}
+
+static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv,
+ union dmub_rb_cmd *cmd)
+{
+ dc_dmub_srv_cmd_queue(dmub_srv, cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
+}
+
+static void dc_build_cursor_position_update_payload0(
+ struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
+ const struct hubp *hubp, const struct dpp *dpp)
+{
+ /* Hubp */
+ pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw;
+ pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
+ pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
+ pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
+
+ /* dpp */
+ pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
+ pl->position_cfg.pipe_idx = p_idx;
+}
+
+static void dc_build_cursor_attribute_update_payload1(
+ struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
+ const struct hubp *hubp, const struct dpp *dpp)
+{
+ /* Hubp */
+ pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
+ pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw;
+ pl_A->aHubp.size.raw = hubp->att.size.raw;
+ pl_A->aHubp.settings.raw = hubp->att.settings.raw;
+
+ /* dpp */
+ pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
+}
+
+/**
+ * ***************************************************************************************
+ * dc_send_update_cursor_info_to_dmu: Populate the DMCUB Cursor update info command
+ *
+ * This function would store the cursor related information and pass it into dmub
+ *
+ * @param [in] pCtx: pipe context
+ * @param [in] pipe_idx: pipe index
+ *
+ * @return: void
+ *
+ * ***************************************************************************************
+ */
+
+void dc_send_update_cursor_info_to_dmu(
+ struct pipe_ctx *pCtx, uint8_t pipe_idx)
+{
+ union dmub_rb_cmd cmd = { 0 };
+ union dmub_cmd_update_cursor_info_data *update_cursor_info =
+ &cmd.update_cursor_info.update_cursor_info_data;
+
+ if (!dc_dmub_should_update_cursor_data(pCtx))
+ return;
+ /*
+ * Since we use multi_cmd_pending for dmub command, the 2nd command is
+ * only assigned to store cursor attributes info.
+ * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
+ * is to store cursor position info.
+ *
+ * Command heaer type must be the same type if using multi_cmd_pending.
+ * Besides, while process 2nd command in DMU, the sub type is useless.
+ * So it's meanless to pass the sub type header with different type.
+ */
+
+ {
+ /* Build Payload#0 Header */
+ cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd.update_cursor_info.header.payload_bytes =
+ sizeof(cmd.update_cursor_info.update_cursor_info_data);
+ cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */
+
+ /* Prepare Payload */
+ dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0);
+
+ dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx,
+ pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+ /* Send update_curosr_info to queue */
+ dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd);
+ }
+ {
+ /* Build Payload#1 Header */
+ memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data));
+ cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
+ cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */
+
+ dc_build_cursor_attribute_update_payload1(
+ &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
+ pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+
+ /* Combine 2nd cmds update_curosr_info to DMU */
+ dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 7e438345b1a8..d34f5563df2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -88,4 +88,5 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu
void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
+void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index bf5f9e2773bc..caf0c7af2d0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -138,6 +138,10 @@ struct dc_panel_config {
bool disable_dsc_edp;
unsigned int force_dsc_edp_policy;
} dsc;
+ /* eDP ILR */
+ struct ilr {
+ bool optimize_edp_link_rate; /* eDP ILR */
+ } ilr;
};
/*
* A link contains one or more sinks and their connected status.
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 32782ef9ef77..140297c8ff55 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -942,10 +942,6 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_RET_ERROR_ENGINE_ACQUIRE:
case AUX_RET_ERROR_UNKNOWN:
default:
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
- LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: Failure: operation_result=%d",
- (int)operation_result);
goto fail;
}
}
@@ -953,14 +949,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
fail:
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
LOG_FLAG_Error_I2cAux,
- "dce_aux_transfer_with_retries: FAILURE");
+ "%s: Failure: operation_result=%d",
+ __func__,
+ (int)operation_result);
if (!payload_reply)
payload->reply = NULL;
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
- WPP_BIT_FLAG_DC_ERROR,
- "AUX transaction failed. Result: %d",
- operation_result);
-
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 897f412f539e..b9765b3899e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -469,6 +469,7 @@ void dpp1_set_cursor_position(
REG_UPDATE(CURSOR0_CONTROL,
CUR0_ENABLE, cur_en);
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 72521749c01d..11e4c4e46947 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2244,6 +2244,9 @@ void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Setting up OTG reset trigger\n");
for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -2254,13 +2257,21 @@ void dcn10_enable_timing_synchronization(
for (i = 0; i < group_size; i++) {
if (grouped_pipes[i]->stream == NULL)
continue;
+
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream->vblank_synchronized = false;
}
- for (i = 1; i < group_size; i++)
+ for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
grouped_pipes[i]->stream_res.tg,
grouped_pipes[0]->stream_res.tg->inst);
+ }
DC_SYNC_INFO("Waiting for trigger\n");
@@ -2268,12 +2279,21 @@ void dcn10_enable_timing_synchronization(
* synchronized. Look at last pipe programmed to reset.
*/
- wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
- for (i = 1; i < group_size; i++)
+ if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
+
+ for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
grouped_pipes[i]->stream_res.tg);
+ }
for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -3005,6 +3025,7 @@ void dcn10_prepare_bandwidth(
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3027,8 +3048,11 @@ void dcn10_prepare_bandwidth(
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
@@ -3041,6 +3065,7 @@ void dcn10_optimize_bandwidth(
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3064,8 +3089,11 @@ void dcn10_optimize_bandwidth(
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
@@ -3344,127 +3372,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
return false;
}
-static bool dcn10_dmub_should_update_cursor_data(
- struct pipe_ctx *pipe_ctx,
- struct dc_debug_options *debug)
-{
- if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
- return false;
-
- if (dcn10_can_pipe_disable_cursor(pipe_ctx))
- return false;
-
- if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- && pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
- return true;
-
- return false;
-}
-
-static void dcn10_dmub_update_cursor_data(
- struct pipe_ctx *pipe_ctx,
- struct hubp *hubp,
- const struct dc_cursor_mi_param *param,
- const struct dc_cursor_position *cur_pos,
- const struct dc_cursor_attributes *cur_attr)
-{
- union dmub_rb_cmd cmd;
- struct dmub_cmd_update_cursor_info_data *update_cursor_info;
- const struct dc_cursor_position *pos;
- const struct dc_cursor_attributes *attr;
- int src_x_offset = 0;
- int src_y_offset = 0;
- int x_hotspot = 0;
- int cursor_height = 0;
- int cursor_width = 0;
- uint32_t cur_en = 0;
- unsigned int panel_inst = 0;
-
- struct dc_debug_options *debug = &hubp->ctx->dc->debug;
-
- if (!dcn10_dmub_should_update_cursor_data(pipe_ctx, debug))
- return;
- /**
- * if cur_pos == NULL means the caller is from cursor_set_attribute
- * then driver use previous cursor position data
- * if cur_attr == NULL means the caller is from cursor_set_position
- * then driver use previous cursor attribute
- * if cur_pos or cur_attr is not NULL then update it
- */
- if (cur_pos != NULL)
- pos = cur_pos;
- else
- pos = &hubp->curs_pos;
-
- if (cur_attr != NULL)
- attr = cur_attr;
- else
- attr = &hubp->curs_attr;
-
- if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, pipe_ctx->stream->link, &panel_inst))
- return;
-
- src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
- x_hotspot = pos->x_hotspot;
- cursor_height = (int)attr->height;
- cursor_width = (int)attr->width;
- cur_en = pos->enable ? 1:0;
-
- // Rotated cursor width/height and hotspots tweaks for offset calculation
- if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- swap(cursor_height, cursor_width);
- if (param->rotation == ROTATION_ANGLE_90) {
- src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
- }
- } else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
-
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
- }
-
- if (src_x_offset >= (int)param->viewport.width)
- cur_en = 0; /* not visible beyond right edge*/
-
- if (src_x_offset + cursor_width <= 0)
- cur_en = 0; /* not visible beyond left edge*/
-
- if (src_y_offset >= (int)param->viewport.height)
- cur_en = 0; /* not visible beyond bottom edge*/
-
- if (src_y_offset + cursor_height <= 0)
- cur_en = 0; /* not visible beyond top edge*/
-
- // Cursor bitmaps have different hotspot values
- // There's a possibility that the above logic returns a negative value, so we clamp them to 0
- if (src_x_offset < 0)
- src_x_offset = 0;
- if (src_y_offset < 0)
- src_y_offset = 0;
-
- memset(&cmd, 0x0, sizeof(cmd));
- cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
- cmd.update_cursor_info.header.payload_bytes =
- sizeof(cmd.update_cursor_info.update_cursor_info_data);
- update_cursor_info = &cmd.update_cursor_info.update_cursor_info_data;
- update_cursor_info->cursor_rect.x = src_x_offset + param->viewport.x;
- update_cursor_info->cursor_rect.y = src_y_offset + param->viewport.y;
- update_cursor_info->cursor_rect.width = attr->width;
- update_cursor_info->cursor_rect.height = attr->height;
- update_cursor_info->enable = cur_en;
- update_cursor_info->pipe_idx = pipe_ctx->pipe_idx;
- update_cursor_info->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
- update_cursor_info->panel_inst = panel_inst;
- dc_dmub_srv_cmd_queue(pipe_ctx->stream->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(pipe_ctx->stream->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(pipe_ctx->stream->ctx->dmub_srv);
-}
-
void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -3699,7 +3606,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
}
- dcn10_dmub_update_cursor_data(pipe_ctx, hubp, &param, &pos_cpy, NULL);
hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
}
@@ -3707,25 +3613,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
- struct dc_cursor_mi_param param = { 0 };
-
- /**
- * If enter PSR without cursor attribute update
- * the cursor attribute of dmub_restore_plane
- * are initial value. call dmub to exit PSR and
- * restore plane then update cursor attribute to
- * avoid override with initial value
- */
- if (pipe_ctx->plane_state != NULL) {
- param.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- param.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz;
- param.viewport = pipe_ctx->plane_res.scl_data.viewport;
- param.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz;
- param.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert;
- param.rotation = pipe_ctx->plane_state->rotation;
- param.mirror = pipe_ctx->plane_state->horizontal_mirror;
- dcn10_dmub_update_cursor_data(pipe_ctx, pipe_ctx->plane_res.hubp, &param, NULL, attributes);
- }
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
pipe_ctx->plane_res.hubp, attributes);
@@ -3810,28 +3697,14 @@ void dcn10_calc_vupdate_position(
uint32_t *start_line,
uint32_t *end_line)
{
- const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
- int vline_int_offset_from_vupdate =
- pipe_ctx->stream->periodic_interrupt.lines_offset;
- int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- int start_position;
-
- if (vline_int_offset_from_vupdate > 0)
- vline_int_offset_from_vupdate--;
- else if (vline_int_offset_from_vupdate < 0)
- vline_int_offset_from_vupdate++;
-
- start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
+ const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- if (start_position >= 0)
- *start_line = start_position;
+ if (vupdate_pos >= 0)
+ *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
else
- *start_line = dc_crtc_timing->v_total + start_position - 1;
-
- *end_line = *start_line + 2;
-
- if (*end_line >= dc_crtc_timing->v_total)
- *end_line = 2;
+ *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
+ *end_line = (*start_line + 2) % timing->v_total;
}
static void dcn10_cal_vline_position(
@@ -3840,23 +3713,27 @@ static void dcn10_cal_vline_position(
uint32_t *start_line,
uint32_t *end_line)
{
- switch (pipe_ctx->stream->periodic_interrupt.ref_point) {
- case START_V_UPDATE:
- dcn10_calc_vupdate_position(
- dc,
- pipe_ctx,
- start_line,
- end_line);
- break;
- case START_V_SYNC:
+ const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
+
+ if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
+ if (vline_pos > 0)
+ vline_pos--;
+ else if (vline_pos < 0)
+ vline_pos++;
+
+ vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+ if (vline_pos >= 0)
+ *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
+ else
+ *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
+ *end_line = (*start_line + 2) % timing->v_total;
+ } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
// vsync is line 0 so start_line is just the requested line offset
- *start_line = pipe_ctx->stream->periodic_interrupt.lines_offset;
- *end_line = *start_line + 2;
- break;
- default:
+ *start_line = vline_pos;
+ *end_line = (*start_line + 2) % timing->v_total;
+ } else
ASSERT(0);
- break;
- }
}
void dcn10_setup_periodic_interrupt(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index ea7739255119..33d780218790 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -207,10 +207,7 @@ void optc1_program_timing(
/* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and
* OTG_V_TOTAL_MIN are equal to V_TOTAL.
*/
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, v_total);
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, v_total);
+ optc->funcs->set_vtotal_min_max(optc, v_total, v_total);
/* v_sync_start = 0, v_sync_end = v_sync_width */
v_sync_end = patched_crtc_timing.v_sync_width;
@@ -649,13 +646,6 @@ uint32_t optc1_get_vblank_counter(struct timing_generator *optc)
void optc1_lock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t regval = 0;
-
- regval = REG_READ(OTG_CONTROL);
-
- /* otg is not running, do not need to be locked */
- if ((regval & 0x1) == 0x0)
- return;
REG_SET(OTG_GLOBAL_CONTROL0, 0,
OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
@@ -663,12 +653,10 @@ void optc1_lock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 1);
/* Should be fast, status does not update on maximus */
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) {
-
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
- }
}
void optc1_unlock(struct timing_generator *optc)
@@ -679,16 +667,6 @@ void optc1_unlock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 0);
}
-bool optc1_is_locked(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t locked;
-
- REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &locked);
-
- return (locked == 1);
-}
-
void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
@@ -941,11 +919,7 @@ void optc1_set_drr(
}
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, params->vertical_total_max - 1);
-
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, params->vertical_total_min - 1);
+ optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
@@ -964,11 +938,7 @@ void optc1_set_drr(
OTG_V_TOTAL_MAX_SEL, 0,
OTG_FORCE_LOCK_ON_EVENT, 0);
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, 0);
-
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, 0);
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
}
@@ -1583,11 +1553,11 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc1_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = NULL,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.set_test_pattern = optc1_set_test_pattern,
.program_stereo = optc1_program_stereo,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 6323ca6dc3b3..88ac5f6f4c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -654,7 +654,6 @@ void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking);
bool optc1_is_blanked(struct timing_generator *optc);
-bool optc1_is_locked(struct timing_generator *optc);
void optc1_program_blank_color(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 831080b9eb87..56d30baf12df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1336,6 +1336,21 @@ static noinline void dcn10_resource_construct_fp(
}
}
+static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+{
+ int i;
+
+ if (clks->num_levels == 0)
+ return false;
+
+ for (i = 0; i < clks->num_levels; i++)
+ /* Ensure that the result is sane */
+ if (clks->data[i].clocks_in_khz == 0)
+ return false;
+
+ return true;
+}
+
static bool dcn10_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
@@ -1345,6 +1360,9 @@ static bool dcn10_resource_construct(
int j;
struct dc_context *ctx = dc->ctx;
uint32_t pipe_fuses = read_pipe_fuses(ctx);
+ struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
+ bool res;
ctx->dc_bios->regs = &bios_regs;
@@ -1523,15 +1541,53 @@ static bool dcn10_resource_construct(
&& pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
dc->debug.az_endpoint_mute_only = false;
- DC_FP_START();
- if (!dc->debug.disable_pplib_clock_request)
- dcn_bw_update_from_pplib(dc);
+
+ if (!dc->debug.disable_pplib_clock_request) {
+ /*
+ * TODO: This is not the proper way to obtain
+ * fabric_and_dram_bandwidth, should be min(fclk, memclk).
+ */
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
+
+ DC_FP_START();
+
+ if (res)
+ res = verify_clock_values(&fclks);
+
+ if (res)
+ dcn_bw_update_from_pplib_fclks(dc, &fclks);
+ else
+ BREAK_TO_DEBUGGER();
+
+ DC_FP_END();
+
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
+
+ DC_FP_START();
+
+ if (res)
+ res = verify_clock_values(&dcfclks);
+
+ if (res)
+ dcn_bw_update_from_pplib_dcfclks(dc, &dcfclks);
+ else
+ BREAK_TO_DEBUGGER();
+
+ DC_FP_END();
+ }
+
dcn_bw_sync_calcs_and_dml(dc);
if (!dc->debug.disable_pplib_wm_range) {
dc->res_pool = &pool->base;
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ DC_FP_START();
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
+ DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
- DC_FP_END();
{
struct irq_service_init_data init_data;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index b1ec0e6f7f58..4996d2810edb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -617,6 +617,17 @@ void hubp2_cursor_set_attributes(
CURSOR0_DST_Y_OFFSET, 0,
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
}
void hubp2_dmdata_set_attributes(
@@ -1033,6 +1044,25 @@ void hubp2_cursor_set_position(
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
+ /* Cursor Position Register Config */
+ hubp->pos.cur_ctl.bits.cur_enable = cur_en;
+ hubp->pos.position.bits.x_pos = pos->x;
+ hubp->pos.position.bits.y_pos = pos->y;
+ hubp->pos.hot_spot.bits.x_hot = x_hotspot;
+ hubp->pos.hot_spot.bits.y_hot = y_hotspot;
+ hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
+ /* Cursor Rectangle Cache
+ * Cursor bitmaps have different hotspot values
+ * There's a possibility that the above logic returns a negative value,
+ * so we clamp them to 0
+ */
+ if (src_x_offset < 0)
+ src_x_offset = 0;
+ if (src_y_offset < 0)
+ src_y_offset = 0;
+ /* Save necessary cursor info x, y position. w, h is saved in attribute func. */
+ hubp->cur_rect.x = src_x_offset + param->viewport.x;
+ hubp->cur_rect.y = src_y_offset + param->viewport.y;
}
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e1d271fe9e64..d732b6f031a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1862,24 +1862,6 @@ void dcn20_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpcc_pipe;
-
- if (pipe->vtp_locked) {
- dc->hwseq->funcs.wait_for_blank_complete(pipe->stream_res.opp);
- pipe->plane_res.hubp->funcs->set_blank(pipe->plane_res.hubp, true);
- pipe->vtp_locked = false;
-
- for (mpcc_pipe = pipe->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
- mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* If an active, non-phantom pipe is being transitioned into a phantom
@@ -2018,6 +2000,10 @@ void dcn20_optimize_bandwidth(
context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ /* increase compbuf size */
+ if (hubbub->funcs->program_compbuf_size)
+ hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
@@ -2033,9 +2019,6 @@ void dcn20_optimize_bandwidth(
pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
}
}
- /* increase compbuf size */
- if (hubbub->funcs->program_compbuf_size)
- hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
}
bool dcn20_update_bandwidth(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 0340fdd3f5fb..a08c335b7383 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -529,6 +529,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 5752271f22df..c5e200d09038 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,15 +67,9 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active, prefetch_done;
+ uint32_t riommu_active;
int i;
- REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
-
- if (prefetch_done) {
- hubbub->riommu_active = true;
- return;
- }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 7cb35bb1c0f1..887081472c0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -657,7 +657,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
- .optimize_edp_link_rate = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -677,6 +676,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
@@ -1367,6 +1372,11 @@ static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
+static void dcn21_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
#define CTX ctx
#define REG(reg_name) \
@@ -1408,6 +1418,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
+ .get_panel_config_defaults = dcn21_get_panel_config_defaults,
};
static bool dcn21_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 4a668d6563df..e5b7ef7422b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -372,6 +372,10 @@ void dpp3_set_cursor_attributes(
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
+
+ dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
+ dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en;
+ dpp_base->att.cur0_ctl.bits.mode = color_format;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 1782b9c26cf4..892d3c4d01a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -319,13 +319,13 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
@@ -366,4 +366,3 @@ void dcn30_timing_generator_init(struct optc *optc1)
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 3a3b2ac791c7..020f512e9690 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1655,6 +1655,9 @@ noinline bool dcn30_internal_validate_bw(
if (!pipes)
return false;
+ context->bw_ctx.dml.vba.maxMpcComb = 0;
+ context->bw_ctx.dml.vba.VoltageLevel = 0;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
@@ -1873,6 +1876,7 @@ noinline bool dcn30_internal_validate_bw(
if (repopulate_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ context->bw_ctx.dml.vba.VoltageLevel = vlevel;
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 559e563d5bc1..f04595b750ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -852,7 +852,7 @@ static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
vmid->masks = &vmid_masks;
}
- hubbub3->num_vmid = res_cap_dcn301.num_vmid;
+ hubbub3->num_vmid = res_cap_dcn301.num_vmid;
return &hubbub3->base;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 52fb2bf3d578..814f401db3b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -197,7 +197,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
uint32_t h_back_porch;
uint32_t h_width;
uint32_t v_height;
- unsigned long long v_freq;
+ uint64_t v_freq;
uint8_t misc0 = 0;
uint8_t misc1 = 0;
uint8_t hsp;
@@ -360,7 +360,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80;
vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;
- v_freq = hw_crtc_timing.pix_clk_100hz * 100;
+ v_freq = (uint64_t)hw_crtc_timing.pix_clk_100hz * 100;
/* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
*
@@ -436,32 +436,28 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
{
struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
uint32_t dmdata_packet_enabled = 0;
- bool sdp_stream_enable = false;
- if (info_frame->vsc.valid) {
+ if (info_frame->vsc.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
0, /* packetIndex */
&info_frame->vsc,
true);
- sdp_stream_enable = true;
- }
- if (info_frame->spd.valid) {
+
+ if (info_frame->spd.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
2, /* packetIndex */
&info_frame->spd,
true);
- sdp_stream_enable = true;
- }
- if (info_frame->hdrsmd.valid) {
+
+ if (info_frame->hdrsmd.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
3, /* packetIndex */
&info_frame->hdrsmd,
true);
- sdp_stream_enable = true;
- }
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index 2f7404a97479..63a677c8ee27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -201,7 +201,6 @@ void optc31_set_drr(
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
-
} else {
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
@@ -260,7 +259,6 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 8c1a6fb36306..fddc21a5a04c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -888,9 +888,8 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.disable_z10 = true,
- .optimize_edp_link_rate = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
+ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -911,6 +910,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1803,6 +1808,11 @@ validate_out:
return out;
}
+static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1829,6 +1839,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn31_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn31_get_panel_config_defaults,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 0d2ffb692957..7e773bf7b895 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -262,7 +262,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
-void enc314_stream_encoder_dp_blank(
+static void enc314_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 24ec71cbd3e3..d0ad72caead2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -881,7 +881,8 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_z10 = true, /*hw not support it*/
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
@@ -914,7 +915,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
.seamless_boot_odm_combine = true
};
@@ -936,6 +936,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1675,6 +1681,11 @@ static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
DC_FP_END();
}
+static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct resource_funcs dcn314_res_pool_funcs = {
.destroy = dcn314_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1697,6 +1708,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn314_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn314_get_panel_config_defaults,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index eebb42c9ddd6..58746c437554 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
.psr_power_use_phy_fsm = 0,
};
@@ -907,6 +906,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1708,6 +1713,11 @@ static int dcn315_populate_dml_pipes_from_context(
return pipe_cnt;
}
+static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1721,7 +1731,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.panel_cntl_create = dcn31_panel_cntl_create,
.validate_bandwidth = dcn31_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
- .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .update_soc_for_wm_a = dcn315_update_soc_for_wm_a,
.populate_dml_pipes = dcn315_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1734,6 +1744,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn315_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn315_get_panel_config_defaults,
};
static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index f4b52a35ad84..6b40a11ac83a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -906,6 +905,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1710,6 +1715,11 @@ static int dcn316_populate_dml_pipes_from_context(
return pipe_cnt;
}
+static void dcn316_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1736,6 +1746,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn316_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn316_get_panel_config_defaults,
};
static bool dcn316_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index fdae6aa89908..076969d928af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -150,12 +150,6 @@ static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
}
-void enc32_set_dig_output_mode(struct link_encoder *enc, uint8_t pix_per_container)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container);
-}
-
static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
@@ -186,7 +180,6 @@ static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.is_in_alt_mode = dcn32_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn32_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
- .set_dig_output_mode = enc32_set_dig_output_mode,
};
void dcn32_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index 749a1e8cb811..bbcfce06bec0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -53,8 +53,4 @@ void dcn32_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
-void enc32_set_dig_output_mode(
- struct link_encoder *enc,
- uint8_t pix_per_container);
-
#endif /* __DC_LINK_ENCODER__DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 0e9dce414641..d19fc93dbc75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -243,6 +243,39 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+static bool is_h_timing_divisible_by_2(const struct dc_crtc_timing *timing)
+{
+ /* math borrowed from function of same name in inc/resource
+ * checks if h_timing is divisible by 2
+ */
+
+ bool divisible = false;
+ uint16_t h_blank_start = 0;
+ uint16_t h_blank_end = 0;
+
+ if (timing) {
+ h_blank_start = timing->h_total - timing->h_front_porch;
+ h_blank_end = h_blank_start - timing->h_addressable;
+
+ /* HTOTAL, Hblank start/end, and Hsync start/end all must be
+ * divisible by 2 in order for the horizontal timing params
+ * to be considered divisible by 2. Hsync start is always 0.
+ */
+ divisible = (timing->h_total % 2 == 0) &&
+ (h_blank_start % 2 == 0) &&
+ (h_blank_end % 2 == 0) &&
+ (timing->h_sync_width % 2 == 0);
+ }
+ return divisible;
+}
+
+static bool is_dp_dig_pixel_rate_div_policy(struct dc *dc, const struct dc_crtc_timing *timing)
+{
+ /* should be functionally the same as dcn32_is_dp_dig_pixel_rate_div_policy for DP encoders*/
+ return is_h_timing_divisible_by_2(timing) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy;
+}
+
static void enc32_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -259,7 +292,7 @@ static void enc32_stream_encoder_dp_unblank(
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1
- || dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ || is_dp_dig_pixel_rate_div_policy(dc, &param->timing)) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
}
@@ -355,7 +388,7 @@ static void enc32_dp_set_dsc_config(struct stream_encoder *enc,
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode);
+ REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
}
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
@@ -378,24 +411,6 @@ static void enc32_read_state(struct stream_encoder *enc, struct enc_state *s)
}
}
-static void enc32_stream_encoder_reset_fifo(struct stream_encoder *enc)
-{
- struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t fifo_enabled;
-
- REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &fifo_enabled);
-
- if (fifo_enabled == 0) {
- /* reset DIG resync FIFO */
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
- /* TODO: fix timeout when wait for DIG_FIFO_RESET_DONE */
- //REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 1, 100);
- udelay(1);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
- REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 1, 100);
- }
-}
-
static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -425,8 +440,6 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc32_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index 250d9a341cf6..ecd041a446d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -71,7 +71,9 @@
SRI(DP_MSE_RATE_UPDATE, DP, id), \
SRI(DP_PIXEL_FORMAT, DP, id), \
SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_SEC_CNTL2, DP, id), \
+ SRI(DP_SEC_CNTL5, DP, id), \
SRI(DP_SEC_CNTL6, DP, id), \
SRI(DP_STEER_FIFO, DP, id), \
SRI(DP_VID_M, DP, id), \
@@ -93,7 +95,7 @@
SRI(DIG_FIFO_CTRL0, DIG, id)
-#define SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)\
+#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, mask_sh),\
@@ -106,6 +108,7 @@
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
@@ -244,15 +247,6 @@
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
- SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh),\
- SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh)
-#else
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
- SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)
-#endif
-
void dcn32_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
index 9db1323e1933..176b1537d2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
@@ -47,6 +47,7 @@
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index f6d3da475835..9fbb72369c10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -936,6 +936,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.program_watermarks = hubbub32_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub32_init_watermarks,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 2038cbda33f7..ac1c6458dd55 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -79,6 +79,8 @@ void hubp32_phantom_hubp_post_enable(struct hubp *hubp)
uint32_t reg_val;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ /* For phantom pipe enable, disable GSL */
+ REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, 0);
REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, 1);
reg_val = REG_READ(DCHUBP_CNTL);
if (reg_val) {
@@ -179,12 +181,12 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_init = hubp3_init,
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp32_update_mall_sel,
- .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
- .hubp_set_flip_int = hubp1_set_flip_int
+ .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index a750343ca521..cf5bd9713f54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -206,8 +206,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
*/
static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
{
- uint8_t i;
- int j;
+ int i, j;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
uint32_t cursor_size = 0;
@@ -630,10 +629,9 @@ bool dcn32_set_input_transfer_func(struct dc *dc,
params = &dpp_base->degamma_params;
}
- result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
+ dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
- if (result &&
- pipe_ctx->stream_res.opp &&
+ if (pipe_ctx->stream_res.opp &&
pipe_ctx->stream_res.opp->ctx &&
hws->funcs.set_mcm_luts)
result = hws->funcs.set_mcm_luts(pipe_ctx, plane_state);
@@ -991,6 +989,10 @@ void dcn32_init_hw(struct dc *dc)
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
}
+
+ /* Enable support for ODM and windowed MPO if policy flag is set */
+ if (dc->debug.enable_single_display_2to1_odm_policy)
+ dc->config.enable_windowed_mpo_odm = true;
}
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
@@ -1145,23 +1147,25 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
true);
}
- // Don't program pixel clock after link is already enabled
-/* if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
- pipe_ctx->clock_source,
- &pipe_ctx->stream_res.pix_clk_params,
- &pipe_ctx->pll_settings)) {
- BREAK_TO_DEBUGGER();
- }*/
+ if (pipe_ctx->stream_res.dsc) {
+ struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
- if (pipe_ctx->stream_res.dsc)
update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
+
+ /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
+ if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
+ current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
+ struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
+ /* disconnect DSC block from stream */
+ dsc->funcs->dsc_disconnect(dsc);
+ }
+ }
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
bool two_pix_per_container = false;
// For phantom pipes, use the same programming as the main pipes
@@ -1189,7 +1193,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -1226,7 +1230,6 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
struct pipe_ctx *odm_pipe;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
uint32_t pix_per_cycle = 1;
params.opp_cnt = 1;
@@ -1245,7 +1248,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
pipe_ctx->stream_res.tg->inst);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1
- || dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) {
params.timing.pix_clk_100hz /= 2;
pix_per_cycle = 2;
}
@@ -1262,6 +1265,9 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+ return false;
+
if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy)
return true;
@@ -1394,7 +1400,7 @@ bool dcn32_dsc_pg_status(
break;
}
- return pwr_status == 0 ? true : false;
+ return pwr_status == 0;
}
void dcn32_update_dsc_pg(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index ec3989d37086..2b33eeb213e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -151,7 +151,7 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
- 1, 100000);
+ 1, 150000);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 05de97ea855f..a88dd7b3d1c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -1680,6 +1680,8 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
+ phantom_plane->is_phantom = true;
+
dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
curr_pipe = curr_pipe->bottom_pipe;
@@ -1749,6 +1751,10 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
pipe->stream->mall_stream_config.type = SUBVP_NONE;
pipe->stream->mall_stream_config.paired_stream = NULL;
}
+
+ if (pipe->plane_state) {
+ pipe->plane_state->is_phantom = false;
+ }
}
return removed_pipe;
}
@@ -1798,14 +1804,39 @@ bool dcn32_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ struct mall_temp_config mall_temp_config;
+
+ /* To handle Freesync properly, setting FreeSync DML parameters
+ * to its default state for the first stage of validation
+ */
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
+ context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
+
DC_LOGGER_INIT(dc->ctx->logger);
+ /* For fast validation, there are situations where a shallow copy of
+ * of the dc->current_state is created for the validation. In this case
+ * we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt
+ * to add it back if it's fast validation). If we don't restore the
+ * subvp config in cases of fast validation + shallow copy of the
+ * dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ */
+ if (fast_validate) {
+ memset(&mall_temp_config, 0, sizeof(mall_temp_config));
+ dcn32_save_mall_state(dc, context, &mall_temp_config);
+ }
+
BW_VAL_TRACE_COUNT();
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
+ if (fast_validate)
+ dcn32_restore_mall_state(dc, context, &mall_temp_config);
+
if (pipe_cnt == 0)
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 55945cca2260..f76120e67c16 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -45,6 +45,17 @@
extern struct _vcs_dpi_ip_params_st dcn3_2_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc;
+/* Temp struct used to save and restore MALL config
+ * during validation.
+ *
+ * TODO: Move MALL config into dc_state instead of stream struct
+ * to avoid needing to save/restore.
+ */
+struct mall_temp_config {
+ struct mall_stream_config mall_stream_config[MAX_PIPES];
+ bool is_phantom_plane[MAX_PIPES];
+};
+
struct dcn32_resource_pool {
struct resource_pool base;
};
@@ -108,6 +119,8 @@ bool dcn32_subvp_in_use(struct dc *dc,
bool dcn32_mpo_in_use(struct dc_state *context);
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
+
struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_state *state,
const struct resource_pool *pool,
@@ -120,6 +133,15 @@ void dcn32_determine_det_override(struct dc *dc,
void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes);
+
+void dcn32_save_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config);
+
+void dcn32_restore_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index a2a70a1572b7..d51d0c40ae5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -233,6 +233,23 @@ bool dcn32_mpo_in_use(struct dc_state *context)
return false;
}
+
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0)
+ return true;
+ }
+ return false;
+}
+
/**
* *******************************************************************************************
* dcn32_determine_det_override: Determine DET allocation for each pipe
@@ -363,3 +380,74 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
} else
dcn32_determine_det_override(dc, context, pipes);
}
+
+/**
+ * *******************************************************************************************
+ * dcn32_save_mall_state: Save MALL (SubVP) state for fast validation cases
+ *
+ * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
+ * there are situations where a shallow copy of the dc->current_state is created for the
+ * validation. In this case we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
+ * fast validation). If we don't restore the subvp config in cases of fast validation +
+ * shallow copy of the dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ *
+ * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
+ * validation. We don't expect this to happen in fast_validation=1 cases.
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ * @param [out]: temp_config: struct used to cache the existing MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_save_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream)
+ temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
+
+ if (pipe->plane_state)
+ temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
+ }
+}
+
+/**
+ * *******************************************************************************************
+ * dcn32_restore_mall_state: Restore MALL (SubVP) state for fast validation cases
+ *
+ * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in/out]: context: New DC state to be programmed, restore MALL state into here
+ * @param [in]: temp_config: struct that has the cached MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_restore_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream)
+ pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
+
+ if (pipe->plane_state)
+ pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index 49682a31ecbd..fa9b6603cfd3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -91,7 +91,6 @@ static const struct link_encoder_funcs dcn321_link_enc_funcs = {
.is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
- .set_dig_output_mode = enc32_set_dig_output_mode,
};
void dcn321_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index aed0f689cbbf..61087f2385a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -94,8 +94,6 @@
#include "dcn20/dcn20_vmid.h"
#define DC_LOGGER_INIT(logger)
-#define fixed16_to_double(x) (((double)x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
enum dcn321_clk_src_array_id {
DCN321_CLK_SRC_PLL0,
@@ -1606,7 +1604,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.validate_bandwidth = dcn32_validate_bandwidth,
.calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg,
.populate_dml_pipes = dcn32_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1656,7 +1654,7 @@ static bool dcn321_resource_construct(
#undef REG_STRUCT
#define REG_STRUCT dccg_regs
- dccg_regs_init();
+ dccg_regs_init();
ctx->dc_bios->regs = &bios_regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index d46adc849d2a..e73f089c84bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1444,81 +1444,67 @@ unsigned int dcn_find_dcfclk_suits_all(
return dcf_clk;
}
-static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+void dcn_bw_update_from_pplib_fclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *fclks)
{
- int i;
-
- if (clks->num_levels == 0)
- return false;
-
- for (i = 0; i < clks->num_levels; i++)
- /* Ensure that the result is sane */
- if (clks->data[i].clocks_in_khz == 0)
- return false;
+ unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
- return true;
+ ASSERT(fclks->num_levels);
+
+ vmin0p65_idx = 0;
+ vmid0p72_idx = fclks->num_levels -
+ (fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
+ vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
+ vmax0p9_idx = fclks->num_levels - 1;
+
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+ 32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vmid0p72_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vnom0p8_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vmax0p9_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
}
-void dcn_bw_update_from_pplib(struct dc *dc)
+void dcn_bw_update_from_pplib_dcfclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *dcfclks)
{
- struct dc_context *ctx = dc->ctx;
- struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
- bool res;
- unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
-
- /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
- res = dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
-
- if (res)
- res = verify_clock_values(&fclks);
-
- if (res) {
- ASSERT(fclks.num_levels);
-
- vmin0p65_idx = 0;
- vmid0p72_idx = fclks.num_levels -
- (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
- vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
- vmax0p9_idx = fclks.num_levels - 1;
-
- dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
- 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- } else
- BREAK_TO_DEBUGGER();
-
- res = dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
-
- if (res)
- res = verify_clock_values(&dcfclks);
+ if (dcfclks->num_levels >= 3) {
+ dc->dcn_soc->dcfclkv_min0p65 = dcfclks->data[0].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_mid0p72 = dcfclks->data[dcfclks->num_levels - 3].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_nom0p8 = dcfclks->data[dcfclks->num_levels - 2].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_max0p9 = dcfclks->data[dcfclks->num_levels - 1].clocks_in_khz / 1000.0;
+ }
+}
- if (res && dcfclks.num_levels >= 3) {
- dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0;
- } else
- BREAK_TO_DEBUGGER();
+void dcn_get_soc_clks(
+ struct dc *dc,
+ int *min_fclk_khz,
+ int *min_dcfclk_khz,
+ int *socclk_khz)
+{
+ *min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+ *min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+ *socclk_khz = dc->dcn_soc->socclk * 1000;
}
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+void dcn_bw_notify_pplib_of_wm_ranges(
+ struct dc *dc,
+ int min_fclk_khz,
+ int min_dcfclk_khz,
+ int socclk_khz)
{
struct pp_smu_funcs_rv *pp = NULL;
struct pp_smu_wm_range_sets ranges = {0};
- int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
if (dc->res_pool->pp_smu)
@@ -1526,10 +1512,6 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
if (!pp || !pp->set_wm_ranges)
return;
- min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
- min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
- socclk_khz = dc->dcn_soc->socclk * 1000;
-
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
* Memory clock member variables for Watermarks calculations for each
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index b6e99eefe869..7dd0845d1bd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -292,6 +292,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
.num_chans = 4,
+ .dummy_pstate_latency_us = 10.0
};
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -459,13 +460,30 @@ void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
}
}
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+ dc_assert_fp_enabled();
+
+ if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
+ /* For 315 pstate change is only supported if possible in vactive */
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[context->bw_ctx.dml.vba.VoltageLevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_vactive)
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+ else
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
+ dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us =
+ dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_exit_time_us;
+ }
+}
+
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx;
+ int i, pipe_idx, active_dpp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
@@ -486,72 +504,6 @@ void dcn31_calculate_wm_and_dlg_fp(
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-#if 0 // TODO
- /* Set B:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set C:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Set D:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
-
/* Set A:
* All clocks min required
*
@@ -568,16 +520,17 @@ void dcn31_calculate_wm_and_dlg_fp(
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- /* TODO: remove: */
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- /* end remove*/
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ active_dpp_count++;
+
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -594,6 +547,9 @@ void dcn31_calculate_wm_and_dlg_fp(
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ /* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
+ context->bw_ctx.bw.dcn.clk.p_state_change_support =
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -739,7 +695,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
+ dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315);
else
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 4372f17b55d4..fd58b2561ec9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -35,6 +35,7 @@ void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt);
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index 8dfe639b6508..b612edb14417 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -43,6 +43,8 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN31_MAX_DSC_IMAGE_WIDTH 5184
#define DCN31_MAX_FMT_420_BUFFER_WIDTH 4096
+#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_15_MAX_DET_SIZE 384
// For DML-C changes that hasn't been propagated to VBA yet
//#define __DML_VBA_ALLOW_DELTA__
@@ -3775,6 +3777,17 @@ static noinline void CalculatePrefetchSchedulePerPlane(
&v->VReadyOffsetPix[k]);
}
+static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
+{
+ int i, total_pipes = 0;
+ for (i = 0; i < NumberOfActivePlanes; i++)
+ total_pipes += NoOfDPPThisState[i];
+ *DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
+ if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
+ *DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
+}
+
+
void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
@@ -4533,6 +4546,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
+ if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
+ PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 0571700f53f9..819de0f11012 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -243,7 +243,7 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
}
-/**
+/*
* Finds dummy_latency_index when MCLK switching using firmware based
* vblank stretch is enabled. This function will iterate through the
* table of dummy pstate latencies until the lowest value that allows
@@ -290,15 +290,14 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
/**
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
* and populate pipe_ctx with those params.
- *
- * This function must be called AFTER the phantom pipes are added to context
- * and run through DML (so that the DLG params for the phantom pipes can be
- * populated), and BEFORE we program the timing for the phantom pipes.
- *
* @dc: [in] current dc state
* @context: [in] new dc state
* @pipes: [in] DML pipe params array
* @pipe_cnt: [in] DML pipe count
+ *
+ * This function must be called AFTER the phantom pipes are added to context
+ * and run through DML (so that the DLG params for the phantom pipes can be
+ * populated), and BEFORE we program the timing for the phantom pipes.
*/
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
struct dc_state *context,
@@ -331,8 +330,9 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
}
/**
- * *******************************************************************************************
- * dcn32_predict_pipe_split: Predict if pipe split will occur for a given DML pipe
+ * dcn32_predict_pipe_split - Predict if pipe split will occur for a given DML pipe
+ * @context: [in] New DC state to be programmed
+ * @pipe_e2e: [in] DML pipe end to end context
*
* This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both
* ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is
@@ -343,12 +343,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
* - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and
* MPC is required
*
- * @param [in]: context: New DC state to be programmed
- * @param [in]: pipe_e2e: DML pipe end to end context
- *
- * @return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
- *
- * *******************************************************************************************
+ * Return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
*/
uint8_t dcn32_predict_pipe_split(struct dc_state *context,
display_e2e_pipe_params_st *pipe_e2e)
@@ -504,7 +499,14 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
}
/**
- * dcn32_set_phantom_stream_timing: Set timing params for the phantom stream
+ * dcn32_set_phantom_stream_timing - Set timing params for the phantom stream
+ * @dc: current dc state
+ * @context: new dc state
+ * @ref_pipe: Main pipe for the phantom stream
+ * @phantom_stream: target phantom stream state
+ * @pipes: DML pipe params
+ * @pipe_cnt: number of DML pipes
+ * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*
* Set timing params of the phantom stream based on calculated output from DML.
* This function first gets the DML pipe index using the DC pipe index, then
@@ -517,13 +519,6 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
* that separately.
*
* - Set phantom backporch = vstartup of main pipe
- *
- * @dc: current dc state
- * @context: new dc state
- * @ref_pipe: Main pipe for the phantom stream
- * @pipes: DML pipe params
- * @pipe_cnt: number of DML pipes
- * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*/
void dcn32_set_phantom_stream_timing(struct dc *dc,
struct dc_state *context,
@@ -592,16 +587,14 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
}
/**
- * dcn32_get_num_free_pipes: Calculate number of free pipes
+ * dcn32_get_num_free_pipes - Calculate number of free pipes
+ * @dc: current dc state
+ * @context: new dc state
*
* This function assumes that a "used" pipe is a pipe that has
* both a stream and a plane assigned to it.
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * Number of free pipes available in the context
+ * Return: Number of free pipes available in the context
*/
static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context)
{
@@ -625,7 +618,10 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
}
/**
- * dcn32_assign_subvp_pipe: Function to decide which pipe will use Sub-VP.
+ * dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP.
+ * @dc: current dc state
+ * @context: new dc state
+ * @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
*
* We enter this function if we are Sub-VP capable (i.e. enough pipes available)
* and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if
@@ -639,12 +635,7 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
* for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't
* support MCLK switching naturally [i.e. ACTIVE or VBLANK]).
*
- * @param dc: current dc state
- * @param context: new dc state
- * @param index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
- *
- * Return:
- * True if a valid pipe assignment was found for Sub-VP. Otherwise false.
+ * Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false.
*/
static bool dcn32_assign_subvp_pipe(struct dc *dc,
struct dc_state *context,
@@ -711,7 +702,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
}
/**
- * dcn32_enough_pipes_for_subvp: Function to check if there are "enough" pipes for SubVP.
+ * dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP.
+ * @dc: current dc state
+ * @context: new dc state
*
* This function returns true if there are enough free pipes
* to create the required phantom pipes for any given stream
@@ -723,9 +716,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* pipe which can be used as the phantom pipe for the non pipe
* split pipe.
*
- * @dc: current dc state
- * @context: new dc state
- *
* Return:
* True if there are enough free pipes to assign phantom pipes to at least one
* stream that does not already have phantom pipes assigned. Otherwise false.
@@ -764,7 +754,9 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
}
/**
- * subvp_subvp_schedulable: Determine if SubVP + SubVP config is schedulable
+ * subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
*
* High level algorithm:
* 1. Find longest microschedule length (in us) between the two SubVP pipes
@@ -772,11 +764,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
* pipes still allows for the maximum microschedule to fit in the active
* region for both pipes.
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + SubVP config is schedulable, false otherwise
+ * Return: True if the SubVP + SubVP config is schedulable, false otherwise
*/
static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
{
@@ -836,7 +824,10 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_drr_schedulable: Determine if SubVP + DRR config is schedulable
+ * subvp_drr_schedulable - Determine if SubVP + DRR config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
+ * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
@@ -845,12 +836,7 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
* 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame))
* then report the configuration as supported
*
- * @dc: current dc state
- * @context: new dc state
- * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
- *
- * Return:
- * bool - True if the SubVP + DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + DRR config is schedulable, false otherwise
*/
static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struct pipe_ctx *drr_pipe)
{
@@ -914,7 +900,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
/**
- * subvp_vblank_schedulable: Determine if SubVP + VBLANK config is schedulable
+ * subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe
@@ -922,11 +910,7 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
* then report the configuration as supported
* 3. If the VBLANK display is DRR, then take the DRR static schedulability path
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
*/
static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
{
@@ -1003,20 +987,18 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_validate_static_schedulability: Check which SubVP case is calculated and handle
- * static analysis based on the case.
+ * subvp_validate_static_schedulability - Check which SubVP case is calculated
+ * and handle static analysis based on the case.
+ * @dc: current dc state
+ * @context: new dc state
+ * @vlevel: Voltage level calculated by DML
*
* Three cases:
* 1. SubVP + SubVP
* 2. SubVP + VBLANK (DRR checked internally)
* 3. SubVP + VACTIVE (currently unsupported)
*
- * @dc: current dc state
- * @context: new dc state
- * @vlevel: Voltage level calculated by DML
- *
- * Return:
- * bool - True if statically schedulable, false otherwise
+ * Return: True if statically schedulable, false otherwise
*/
static bool subvp_validate_static_schedulability(struct dc *dc,
struct dc_state *context,
@@ -1115,7 +1097,8 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
*/
if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
- !dcn32_mpo_in_use(context) && (*vlevel == context->bw_ctx.dml.soc.num_states ||
+ !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) &&
+ (*vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
@@ -1597,6 +1580,9 @@ bool dcn32_internal_validate_bw(struct dc *dc,
/*MPC split rules will handle this case*/
pipe->bottom_pipe->top_pipe = NULL;
} else {
+ /* when merging an ODM pipes, the bottom MPC pipe must now point to
+ * the previous ODM pipe and its associated stream assets
+ */
if (pipe->prev_odm_pipe->bottom_pipe) {
/* 3 plane MPO*/
pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
@@ -1606,6 +1592,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
}
+
+ memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource));
}
}
@@ -1781,6 +1769,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
int i, pipe_idx, vlevel_temp = 0;
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
dm_dram_clock_change_unsupported;
unsigned int dummy_latency_index = 0;
@@ -1816,7 +1805,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
- dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] !=
dm_dram_clock_change_unsupported;
}
@@ -1902,6 +1891,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching;
+ }
+
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
min_dram_speed_mts_margin = 160;
@@ -2275,7 +2268,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
return 0;
}
-/**
+/*
* dcn32_update_bw_bounding_box
*
* This would override some dcn3_2 ip_or_soc initial parameters hardcoded from
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 75be1e1ce543..5b91660a6496 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -733,6 +733,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
mode_lib->vba.FCLKChangeLatency, v->UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
+ memset(&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, 0, sizeof(DmlPipe));
+
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dppclk = mode_lib->vba.DPPCLK[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dispclk = mode_lib->vba.DISPCLK;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.PixelClock = mode_lib->vba.PixelClock[k];
@@ -2252,9 +2254,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
- || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0
- || mode_lib->vba.DSCInputBitPerComponent[k] >
- mode_lib->vba.MaximumDSCBitsPerComponent)) {
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)
+ || mode_lib->vba.DSCInputBitPerComponent[k] > mode_lib->vba.MaximumDSCBitsPerComponent) {
mode_lib->vba.NonsupportedDSCInputBPC = true;
}
}
@@ -2330,16 +2331,15 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
- if (mode_lib->vba.OutputMultistreamId[k] == j && mode_lib->vba.OutputMultistreamEn[k]
+ if (mode_lib->vba.OutputMultistreamId[k] == j
&& mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
}
}
if ((mode_lib->vba.Output[k] == dm_edp || mode_lib->vba.Output[k] == dm_hdmi)) {
- if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.OutputMultistreamEn[k])
+ if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
-
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == j)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
@@ -2478,8 +2478,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k]);
}
- m = 0;
-
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActiveSurfaces - 1; m++) {
for (j = 0; j <= mode_lib->vba.NumberOfActiveSurfaces - 1; j++) {
@@ -2856,8 +2854,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- m = 0;
-
//Calculate Return BW
for (i = 0; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
@@ -3618,11 +3614,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.ModeIsSupported = mode_lib->vba.ModeSupport[i][0] == true
|| mode_lib->vba.ModeSupport[i][1] == true;
- if (mode_lib->vba.ModeSupport[i][0] == true) {
+ if (mode_lib->vba.ModeSupport[i][0] == true)
MaximumMPCCombine = 0;
- } else {
+ else
MaximumMPCCombine = 1;
- }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index f5400eda07a5..4125d3d111d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -114,6 +114,7 @@ void dml_init_instance(struct display_mode_lib *lib,
break;
case DML_PROJECT_DCN31:
case DML_PROJECT_DCN31_FPGA:
+ case DML_PROJECT_DCN315:
lib->funcs = dml31_funcs;
break;
case DML_PROJECT_DCN314:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index b1878a1440e2..3d643d50c3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -40,6 +40,7 @@ enum dml_project {
DML_PROJECT_DCN21,
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
+ DML_PROJECT_DCN315,
DML_PROJECT_DCN31_FPGA,
DML_PROJECT_DCN314,
DML_PROJECT_DCN32,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 8919a2092ac5..9498105c98ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -39,6 +39,8 @@
#include "panel_cntl.h"
#define MAX_CLOCK_SOURCES 7
+#define MAX_SVP_PHANTOM_STREAMS 2
+#define MAX_SVP_PHANTOM_PLANES 2
void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
uint32_t controller_id);
@@ -232,6 +234,7 @@ struct resource_funcs {
unsigned int index);
bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context);
+ void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
};
struct audio_support{
@@ -438,7 +441,6 @@ struct pipe_ctx {
union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
- bool vtp_locked;
};
/* Data used for dynamic link encoder assignment.
@@ -492,6 +494,8 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
int compbuf_size_kb;
+ unsigned int legacy_svp_drr_stream_index;
+ bool legacy_svp_drr_stream_index_valid;
};
union bw_output {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 806f3041db14..9e4ddc985240 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -628,8 +628,23 @@ unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
struct dc_clocks *clocks);
-void dcn_bw_update_from_pplib(struct dc *dc);
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
+void dcn_get_soc_clks(
+ struct dc *dc,
+ int *min_fclk_khz,
+ int *min_dcfclk_khz,
+ int *socclk_khz);
+
+void dcn_bw_update_from_pplib_fclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *fclks);
+void dcn_bw_update_from_pplib_dcfclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *dcfclks);
+void dcn_bw_notify_pplib_of_wm_ranges(
+ struct dc *dc,
+ int min_fclk_khz,
+ int min_dcfclk_khz,
+ int socclk_khz);
void dcn_bw_sync_calcs_and_dml(struct dc *dc);
enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index d9f1b0a4fbd4..591ab1389e3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -95,10 +95,23 @@ struct clk_limit_table_entry {
unsigned int wck_ratio;
};
+struct clk_limit_num_entries {
+ unsigned int num_dcfclk_levels;
+ unsigned int num_fclk_levels;
+ unsigned int num_memclk_levels;
+ unsigned int num_socclk_levels;
+ unsigned int num_dtbclk_levels;
+ unsigned int num_dispclk_levels;
+ unsigned int num_dppclk_levels;
+ unsigned int num_phyclk_levels;
+ unsigned int num_phyclk_d18_levels;
+};
+
/* This table is contiguous */
struct clk_limit_table {
struct clk_limit_table_entry entries[MAX_NUM_DPM_LVL];
- unsigned int num_entries;
+ struct clk_limit_num_entries num_entries_per_clk;
+ unsigned int num_entries; /* highest populated dpm level for back compatibility */
};
struct wm_range_table_entry {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
new file mode 100644
index 000000000000..45645f9fd86c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2022 Advanced Micro Devices, Inc. All rights reserved. */
+
+#ifndef __DAL_CURSOR_CACHE_H__
+#define __DAL_CURSOR_CACHE_H__
+
+union reg_cursor_control_cfg {
+ struct {
+ uint32_t cur_enable: 1;
+ uint32_t reser0: 3;
+ uint32_t cur_2x_magnify: 1;
+ uint32_t reser1: 3;
+ uint32_t mode: 3;
+ uint32_t reser2: 5;
+ uint32_t pitch: 2;
+ uint32_t reser3: 6;
+ uint32_t line_per_chunk: 5;
+ uint32_t reser4: 3;
+ } bits;
+ uint32_t raw;
+};
+struct cursor_position_cache_hubp {
+ union reg_cursor_control_cfg cur_ctl;
+ union reg_position_cfg {
+ struct {
+ uint32_t x_pos: 16;
+ uint32_t y_pos: 16;
+ } bits;
+ uint32_t raw;
+ } position;
+ union reg_hot_spot_cfg {
+ struct {
+ uint32_t x_hot: 16;
+ uint32_t y_hot: 16;
+ } bits;
+ uint32_t raw;
+ } hot_spot;
+ union reg_dst_offset_cfg {
+ struct {
+ uint32_t dst_x_offset: 13;
+ uint32_t reserved: 19;
+ } bits;
+ uint32_t raw;
+ } dst_offset;
+};
+
+struct cursor_attribute_cache_hubp {
+ uint32_t SURFACE_ADDR_HIGH;
+ uint32_t SURFACE_ADDR;
+ union reg_cursor_control_cfg cur_ctl;
+ union reg_cursor_size_cfg {
+ struct {
+ uint32_t width: 16;
+ uint32_t height: 16;
+ } bits;
+ uint32_t raw;
+ } size;
+ union reg_cursor_settings_cfg {
+ struct {
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
+ } bits;
+ uint32_t raw;
+ } settings;
+};
+
+struct cursor_rect {
+ uint32_t x;
+ uint32_t y;
+ uint32_t w;
+ uint32_t h;
+};
+
+union reg_cur0_control_cfg {
+ struct {
+ uint32_t cur0_enable: 1;
+ uint32_t expansion_mode: 1;
+ uint32_t reser0: 1;
+ uint32_t cur0_rom_en: 1;
+ uint32_t mode: 3;
+ uint32_t reserved: 25;
+ } bits;
+ uint32_t raw;
+};
+struct cursor_position_cache_dpp {
+ union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attribute_cache_dpp {
+ union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attributes_cfg {
+ struct cursor_attribute_cache_hubp aHubp;
+ struct cursor_attribute_cache_dpp aDpp;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 3ef7faa92052..dcb80c4747b0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -28,6 +28,7 @@
#define __DAL_DPP_H__
#include "transform.h"
+#include "cursor_reg_cache.h"
union defer_reg_writes {
struct {
@@ -58,6 +59,9 @@ struct dpp {
struct pwl_params shaper_params;
bool cm_bypass_mode;
+
+ struct cursor_position_cache_dpp pos;
+ struct cursor_attribute_cache_dpp att;
};
struct dpp_input_csc_matrix {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 44c4578193a3..d5ea7545583e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -27,6 +27,7 @@
#define __DAL_HUBP_H__
#include "mem_input.h"
+#include "cursor_reg_cache.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -65,6 +66,10 @@ struct hubp {
struct dc_cursor_attributes curs_attr;
struct dc_cursor_position curs_pos;
bool power_gated;
+
+ struct cursor_position_cache_hubp pos;
+ struct cursor_attribute_cache_hubp att;
+ struct cursor_rect cur_rect;
};
struct surface_flip_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 72eef7a5ed83..25a1df45b264 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -209,7 +209,6 @@ struct timing_generator_funcs {
void (*set_blank)(struct timing_generator *tg,
bool enable_blanking);
bool (*is_blanked)(struct timing_generator *tg);
- bool (*is_locked)(struct timing_generator *tg);
void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
void (*set_colors)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index c37d1141febe..5040836f404d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -230,4 +230,10 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
bool is_h_timing_divisible_by_2(struct dc_stream_state *stream);
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ const struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index 7d3147175ca2..153a88381f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -111,7 +111,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
index 9522fe0b36c9..4f7f99156897 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
@@ -37,7 +37,7 @@ void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
-void virtual_disable_link_output(struct dc_link *link,
+static void virtual_disable_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index f34c45b19fcb..eb5b7eb292ef 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -248,6 +248,7 @@ struct dmub_srv_hw_params {
bool disable_dpia;
bool usb4_cm_version;
bool fw_in_system_memory;
+ bool dpia_hpd_int_enable_supported;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 5d1aadade8a5..7a8f61517424 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -400,8 +400,9 @@ union dmub_fw_boot_options {
uint32_t diag_env: 1; /* 1 if diagnostic environment */
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
uint32_t usb4_cm_version: 1; /**< 1 CM support */
+ uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
- uint32_t reserved : 17; /**< reserved */
+ uint32_t reserved : 16; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -728,6 +729,12 @@ enum dmub_cmd_type {
/**
* Command type used for all VBIOS interface commands.
*/
+
+ /**
+ * Command type used to set DPIA HPD interrupt state
+ */
+ DMUB_CMD__DPIA_HPD_INT_ENABLE = 86,
+
DMUB_CMD__VBIOS = 128,
};
@@ -760,11 +767,6 @@ enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
};
-enum dmub_cmd_header_sub_type {
- DMUB_CMD__SUB_TYPE_GENERAL = 0,
- DMUB_CMD__SUB_TYPE_CURSOR_POSITION = 1
-};
-
#pragma pack(push, 1)
/**
@@ -1261,6 +1263,14 @@ struct dmub_rb_cmd_set_mst_alloc_slots {
};
/**
+ * DMUB command structure for DPIA HPD int enable control.
+ */
+struct dmub_rb_cmd_dpia_hpd_int_enable {
+ struct dmub_cmd_header header; /* header */
+ uint32_t enable; /* dpia hpd interrupt enable */
+};
+
+/**
* struct dmub_rb_cmd_dpphy_init - DPPHY init.
*/
struct dmub_rb_cmd_dpphy_init {
@@ -2089,7 +2099,99 @@ struct dmub_rb_cmd_update_dirty_rect {
/**
* Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
*/
-struct dmub_cmd_update_cursor_info_data {
+union dmub_reg_cursor_control_cfg {
+ struct {
+ uint32_t cur_enable: 1;
+ uint32_t reser0: 3;
+ uint32_t cur_2x_magnify: 1;
+ uint32_t reser1: 3;
+ uint32_t mode: 3;
+ uint32_t reser2: 5;
+ uint32_t pitch: 2;
+ uint32_t reser3: 6;
+ uint32_t line_per_chunk: 5;
+ uint32_t reser4: 3;
+ } bits;
+ uint32_t raw;
+};
+struct dmub_cursor_position_cache_hubp {
+ union dmub_reg_cursor_control_cfg cur_ctl;
+ union dmub_reg_position_cfg {
+ struct {
+ uint32_t cur_x_pos: 16;
+ uint32_t cur_y_pos: 16;
+ } bits;
+ uint32_t raw;
+ } position;
+ union dmub_reg_hot_spot_cfg {
+ struct {
+ uint32_t hot_x: 16;
+ uint32_t hot_y: 16;
+ } bits;
+ uint32_t raw;
+ } hot_spot;
+ union dmub_reg_dst_offset_cfg {
+ struct {
+ uint32_t dst_x_offset: 13;
+ uint32_t reserved: 19;
+ } bits;
+ uint32_t raw;
+ } dst_offset;
+};
+
+union dmub_reg_cur0_control_cfg {
+ struct {
+ uint32_t cur0_enable: 1;
+ uint32_t expansion_mode: 1;
+ uint32_t reser0: 1;
+ uint32_t cur0_rom_en: 1;
+ uint32_t mode: 3;
+ uint32_t reserved: 25;
+ } bits;
+ uint32_t raw;
+};
+struct dmub_cursor_position_cache_dpp {
+ union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_position_cfg {
+ struct dmub_cursor_position_cache_hubp pHubp;
+ struct dmub_cursor_position_cache_dpp pDpp;
+ uint8_t pipe_idx;
+ /*
+ * Padding is required. To be 4 Bytes Aligned.
+ */
+ uint8_t padding[3];
+};
+
+struct dmub_cursor_attribute_cache_hubp {
+ uint32_t SURFACE_ADDR_HIGH;
+ uint32_t SURFACE_ADDR;
+ union dmub_reg_cursor_control_cfg cur_ctl;
+ union dmub_reg_cursor_size_cfg {
+ struct {
+ uint32_t width: 16;
+ uint32_t height: 16;
+ } bits;
+ uint32_t raw;
+ } size;
+ union dmub_reg_cursor_settings_cfg {
+ struct {
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
+ } bits;
+ uint32_t raw;
+ } settings;
+};
+struct dmub_cursor_attribute_cache_dpp {
+ union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_attributes_cfg {
+ struct dmub_cursor_attribute_cache_hubp aHubp;
+ struct dmub_cursor_attribute_cache_dpp aDpp;
+};
+
+struct dmub_cmd_update_cursor_payload0 {
/**
* Cursor dirty rects.
*/
@@ -2116,6 +2218,20 @@ struct dmub_cmd_update_cursor_info_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * Cursor Position Register.
+ * Registers contains Hubp & Dpp modules
+ */
+ struct dmub_cursor_position_cfg position_cfg;
+};
+
+struct dmub_cmd_update_cursor_payload1 {
+ struct dmub_cursor_attributes_cfg attribute_cfg;
+};
+
+union dmub_cmd_update_cursor_info_data {
+ struct dmub_cmd_update_cursor_payload0 payload0;
+ struct dmub_cmd_update_cursor_payload1 payload1;
};
/**
* Definition of a DMUB_CMD__UPDATE_CURSOR_INFO command.
@@ -2128,7 +2244,7 @@ struct dmub_rb_cmd_update_cursor_info {
/**
* Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
*/
- struct dmub_cmd_update_cursor_info_data update_cursor_info_data;
+ union dmub_cmd_update_cursor_info_data update_cursor_info_data;
};
/**
@@ -2825,11 +2941,7 @@ struct dmub_rb_cmd_get_visual_confirm_color {
struct dmub_optc_state {
uint32_t v_total_max;
uint32_t v_total_min;
- uint32_t v_total_mid;
- uint32_t v_total_mid_frame_num;
uint32_t tg_inst;
- uint32_t enable_manual_trigger;
- uint32_t clear_force_vsync;
};
struct dmub_rb_cmd_drr_update {
@@ -3235,6 +3347,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__QUERY_HPD_STATE command.
*/
struct dmub_rb_cmd_query_hpd_state query_hpd;
+ /**
+ * Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command.
+ */
+ struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index c7bd7e216710..c90b9ee42e12 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -350,6 +350,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.dpia_supported = params->dpia_supported;
boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
+ boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 04f7656906ca..447a0ec9cbe2 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1692,7 +1692,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinates_x;
- build_coefficients(&coeff, true);
+ build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
i = 0;
while (i != hw_points_num + 1) {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
index b798cf5a2c39..38adde3cae5a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
@@ -29,5 +29,7 @@
#define regMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 2
#define regMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4
#define regMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 2
+#define regUMCCH0_0_GeccCtrl 0x0053
+#define regUMCCH0_0_GeccCtrl_BASE_IDX 2
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
index bd99b431247f..4dbec524f943 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
@@ -90,5 +90,8 @@
#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38
#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+//UMCCH0_0_GeccCtrl
+#define UMCCH0_0_GeccCtrl__UCFatalEn__SHIFT 0xd
+#define UMCCH0_0_GeccCtrl__UCFatalEn_MASK 0x00002000L
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 8fd0782a2b20..f5e08b60f66e 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -1384,13 +1384,16 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
static void kv_dpm_disable(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
+ int err;
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- amdgpu_kv_smc_bapm_enable(adev, false);
+ err = amdgpu_kv_smc_bapm_enable(adev, false);
+ if (err)
+ DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, false);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e4fcbf8a7eb5..7ef7e81525a3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -3603,7 +3603,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
return -EINVAL);
PP_ASSERT_WITH_CODE(
- (smu7_power_state->performance_level_count <=
+ (smu7_power_state->performance_level_count <
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
return -EINVAL);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 99bfe5efe171..c8c9fb827bda 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -3155,7 +3155,7 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
return -1);
PP_ASSERT_WITH_CODE(
- (vega10_ps->performance_level_count <=
+ (vega10_ps->performance_level_count <
hwmgr->platform_descriptor.
hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 5fbd2ae95869..2b73f5ff0d02 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -120,7 +120,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
- dotclock = pipe_config->port_clock * 2 / 3;
+ dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 2, 3);
else
dotclock = pipe_config->port_clock;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index dd008ba8afe3..461c62c88413 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -8130,6 +8130,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
}
+static int max_dotclock(struct drm_i915_private *i915)
+{
+ int max_dotclock = i915->max_dotclk_freq;
+
+ /* icl+ might use bigjoiner */
+ if (DISPLAY_VER(i915) >= 11)
+ max_dotclock *= 2;
+
+ return max_dotclock;
+}
+
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -8167,6 +8178,13 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
+ /*
+ * Reject clearly excessive dotclocks early to
+ * avoid having to worry about huge integers later.
+ */
+ if (mode->clock > max_dotclock(dev_priv))
+ return MODE_CLOCK_HIGH;
+
/* Transcoder timing limits */
if (DISPLAY_VER(dev_priv) >= 11) {
hdisplay_max = 16384;
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index c86e5d4ee016..1dddd6abd77b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -26,10 +26,17 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
u32 alignment;
int ret;
+ /*
+ * We are not syncing against the binding (and potential migrations)
+ * below, so this vm must never be async.
+ */
+ GEM_WARN_ON(vm->bind_async_flags);
+
if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
@@ -37,29 +44,48 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- ret = i915_gem_object_lock_interruptible(obj, NULL);
- if (!ret) {
+ for_i915_gem_ww(&ww, ret, true) {
+ ret = i915_gem_object_lock(obj, &ww);
+ if (ret)
+ continue;
+
+ if (HAS_LMEM(dev_priv)) {
+ unsigned int flags = obj->flags;
+
+ /*
+ * For this type of buffer we need to able to read from the CPU
+ * the clear color value found in the buffer, hence we need to
+ * ensure it is always in the mappable part of lmem, if this is
+ * a small-bar device.
+ */
+ if (intel_fb_rc_ccs_cc_plane(fb) >= 0)
+ flags &= ~I915_BO_ALLOC_GPU_ONLY;
+ ret = __i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0,
+ flags);
+ if (ret)
+ continue;
+ }
+
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
- i915_gem_object_unlock(obj);
- }
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
+ if (ret)
+ continue;
- vma = i915_vma_instance(obj, vm, view);
- if (IS_ERR(vma))
- goto err;
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ continue;
+ }
- if (i915_vma_misplaced(vma, 0, alignment, 0)) {
- ret = i915_vma_unbind_unlocked(vma);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
+ if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ continue;
}
- }
- ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
+ ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL);
+ if (ret)
+ continue;
+ }
if (ret) {
vma = ERR_PTR(ret);
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 9def8d9fade6..d4cce627d7a8 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -116,34 +116,56 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
}
}
+static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
+ EDP_PSR_ERROR(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
+ EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
+ EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
+ EDP_PSR_MASK(intel_dp->psr.transcoder);
+}
+
static void psr_irq_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum transcoder trans_shift;
i915_reg_t imr_reg;
u32 mask, val;
- /*
- * gen12+ has registers relative to transcoder and one per transcoder
- * using the same bit definition: handle it as TRANSCODER_EDP to force
- * 0 shift in bit definition
- */
- if (DISPLAY_VER(dev_priv) >= 12) {
- trans_shift = 0;
+ if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
- } else {
- trans_shift = intel_dp->psr.transcoder;
+ else
imr_reg = EDP_PSR_IMR;
- }
- mask = EDP_PSR_ERROR(trans_shift);
+ mask = psr_irq_psr_error_bit_get(intel_dp);
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
- mask |= EDP_PSR_POST_EXIT(trans_shift) |
- EDP_PSR_PRE_ENTRY(trans_shift);
+ mask |= psr_irq_post_exit_bit_get(intel_dp) |
+ psr_irq_pre_entry_bit_get(intel_dp);
- /* Warning: it is masking/setting reserved bits too */
val = intel_de_read(dev_priv, imr_reg);
- val &= ~EDP_PSR_TRANS_MASK(trans_shift);
+ val &= ~psr_irq_mask_get(intel_dp);
val |= ~mask;
intel_de_write(dev_priv, imr_reg, val);
}
@@ -191,25 +213,21 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ktime_t time_ns = ktime_get();
- enum transcoder trans_shift;
i915_reg_t imr_reg;
- if (DISPLAY_VER(dev_priv) >= 12) {
- trans_shift = 0;
+ if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
- } else {
- trans_shift = intel_dp->psr.transcoder;
+ else
imr_reg = EDP_PSR_IMR;
- }
- if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
+ if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
intel_dp->psr.last_entry_attempt = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
- if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
+ if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
intel_dp->psr.last_exit = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR exit completed\n",
@@ -226,7 +244,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
}
}
- if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
+ if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
u32 val;
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
@@ -243,7 +261,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* or unset irq_aux_error.
*/
val = intel_de_read(dev_priv, imr_reg);
- val |= EDP_PSR_ERROR(trans_shift);
+ val |= psr_irq_psr_error_bit_get(intel_dp);
intel_de_write(dev_priv, imr_reg, val);
schedule_work(&intel_dp->psr.work);
@@ -1194,14 +1212,12 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12)
val = intel_de_read(dev_priv,
TRANS_PSR_IIR(intel_dp->psr.transcoder));
- val &= EDP_PSR_ERROR(0);
- } else {
+ else
val = intel_de_read(dev_priv, EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
- }
+ val &= psr_irq_psr_error_bit_get(intel_dp);
if (val) {
intel_dp->psr.sink_not_reliable = true;
drm_dbg_kms(&dev_priv->drm,
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 01b0932757ed..18178b01375e 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -1710,10 +1710,22 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
modifier == I915_FORMAT_MOD_4_TILED ||
modifier == I915_FORMAT_MOD_Yf_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
wp->width = width;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 0bcde53c50c6..1e29b1e6d186 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1387,14 +1387,8 @@ kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
*/
for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine;
- bool skip = false;
- if (exit)
- skip = intel_context_set_exiting(ce);
- else if (!persistent)
- skip = intel_context_exit_nonpersistent(ce, NULL);
-
- if (skip)
+ if ((exit || !persistent) && intel_context_revoke(ce))
continue; /* Already marked. */
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index cd75b0ca2555..845023c14eb3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2424,7 +2424,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- get_random_int() % num_vcs_engines(dev_priv);
+ prandom_u32_max(num_vcs_engines(dev_priv));
return file_priv->bsd_engine;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 7ff9c7877bec..369006c5317f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -653,6 +653,41 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
enum intel_region_id id)
{
+ return __i915_gem_object_migrate(obj, ww, id, obj->flags);
+}
+
+/**
+ * __i915_gem_object_migrate - Migrate an object to the desired region id, with
+ * control of the extra flags
+ * @obj: The object to migrate.
+ * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
+ * not be successful in evicting other objects to make room for this object.
+ * @id: The region id to migrate to.
+ * @flags: The object flags. Normally just obj->flags.
+ *
+ * Attempt to migrate the object to the desired memory region. The
+ * object backend must support migration and the object may not be
+ * pinned, (explicitly pinned pages or pinned vmas). The object must
+ * be locked.
+ * On successful completion, the object will have pages pointing to
+ * memory in the new region, but an async migration task may not have
+ * completed yet, and to accomplish that, i915_gem_object_wait_migration()
+ * must be called.
+ *
+ * Note: the @ww parameter is not used yet, but included to make sure
+ * callers put some effort into obtaining a valid ww ctx if one is
+ * available.
+ *
+ * Return: 0 on success. Negative error code on failure. In particular may
+ * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
+ * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
+ * -EBUSY if the object is pinned.
+ */
+int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ enum intel_region_id id,
+ unsigned int flags)
+{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_memory_region *mr;
@@ -672,7 +707,7 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
return 0;
}
- return obj->ops->migrate(obj, mr);
+ return obj->ops->migrate(obj, mr, flags);
}
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 7317d4102955..1723af9b0f6a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -608,6 +608,10 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
enum intel_region_id id);
+int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ enum intel_region_id id,
+ unsigned int flags);
bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
enum intel_region_id id);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 40305e2bcd49..d0d6772e6f36 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -107,7 +107,8 @@ struct drm_i915_gem_object_ops {
* pinning or for as long as the object lock is held.
*/
int (*migrate)(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mr);
+ struct intel_memory_region *mr,
+ unsigned int flags);
void (*release)(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index e3fc38dd5db0..4f861782c3e8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -848,9 +848,10 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
}
static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mr)
+ struct intel_memory_region *mr,
+ unsigned int flags)
{
- return __i915_ttm_migrate(obj, mr, obj->flags);
+ return __i915_ttm_migrate(obj, mr, flags);
}
static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 654a092ed3d6..e94365b08f1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -614,13 +614,12 @@ bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
return ret;
}
-bool intel_context_exit_nonpersistent(struct intel_context *ce,
- struct i915_request *rq)
+bool intel_context_revoke(struct intel_context *ce)
{
bool ret = intel_context_set_exiting(ce);
if (ce->ops->revoke)
- ce->ops->revoke(ce, rq, ce->engine->props.preempt_timeout_ms);
+ ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 8e2d70630c49..be09fb2e883a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -329,8 +329,7 @@ static inline bool intel_context_set_exiting(struct intel_context *ce)
return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
}
-bool intel_context_exit_nonpersistent(struct intel_context *ce,
- struct i915_request *rq);
+bool intel_context_revoke(struct intel_context *ce);
static inline bool
intel_context_force_single_submission(const struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 30cf5c3369d9..2049a00417af 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -1275,10 +1275,16 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- if (!retained_ptes)
+ if (!retained_ptes) {
+ /*
+ * Clear the bound flags of the vma resource to allow
+ * ptes to be repopulated.
+ */
+ vma->resource->bound_flags = 0;
vma->ops->bind_vma(vm, NULL, vma->resource,
obj ? obj->cache_level : 0,
was_bound);
+ }
if (obj) { /* only used during resume => exclusive access */
write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index c6ebe2781076..152244d7f62a 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -207,6 +207,14 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
+ /* Bypass LLC - Uncached (EHL+) */ \
+ MOCS_ENTRY(16, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_1_UC), \
+ /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
+ MOCS_ENTRY(17, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 22ba66e48a9b..1db59eeb34db 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -684,7 +684,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
* Corner case where requests were sitting in the priority list or a
* request resubmitted after the context was banned.
*/
- if (unlikely(intel_context_is_banned(ce))) {
+ if (unlikely(!intel_context_is_schedulable(ce))) {
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine);
return 0;
@@ -870,15 +870,15 @@ static int guc_wq_item_append(struct intel_guc *guc,
struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
- int ret = 0;
+ int ret;
- if (likely(!intel_context_is_banned(ce))) {
- ret = __guc_wq_item_append(rq);
+ if (unlikely(!intel_context_is_schedulable(ce)))
+ return 0;
- if (unlikely(ret == -EBUSY)) {
- guc->stalled_request = rq;
- guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
- }
+ ret = __guc_wq_item_append(rq);
+ if (unlikely(ret == -EBUSY)) {
+ guc->stalled_request = rq;
+ guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
}
return ret;
@@ -897,7 +897,7 @@ static bool multi_lrc_submit(struct i915_request *rq)
* submitting all the requests generated in parallel.
*/
return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
- intel_context_is_banned(ce);
+ !intel_context_is_schedulable(ce);
}
static int guc_dequeue_one_context(struct intel_guc *guc)
@@ -966,7 +966,7 @@ register_context:
struct intel_context *ce = request_to_scheduling_context(last);
if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
- !intel_context_is_banned(ce))) {
+ intel_context_is_schedulable(ce))) {
ret = try_context_registration(ce, false);
if (unlikely(ret == -EPIPE)) {
goto deadlk;
@@ -1576,7 +1576,7 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
{
struct intel_engine_cs *engine = __context_to_physical_engine(ce);
- if (intel_context_is_banned(ce))
+ if (!intel_context_is_schedulable(ce))
return;
GEM_BUG_ON(!intel_context_is_pinned(ce));
@@ -4424,12 +4424,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- if (likely(!intel_context_is_banned(ce))) {
+ if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
} else {
drm_info(&guc_to_gt(guc)->i915->drm,
- "Ignoring context reset notification of banned context 0x%04X on %s",
+ "Ignoring context reset notification of exiting context 0x%04X on %s",
ce->guc_id.id, ce->engine->name);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 329ff75b80b9..7bd1861ddbdf 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -137,12 +137,12 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
range = round_down(end - len, align) - round_up(start, align);
if (range) {
if (sizeof(unsigned long) == sizeof(u64)) {
- addr = get_random_long();
+ addr = get_random_u64();
} else {
- addr = get_random_int();
+ addr = get_random_u32();
if (range > U32_MAX) {
addr <<= 32;
- addr |= get_random_int();
+ addr |= get_random_u32();
}
}
div64_u64_rem(addr, range, &addr);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1a9bd829fc7e..0b287a59dc2f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2157,10 +2157,18 @@
#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
0 : ((trans) - TRANSCODER_A + 1) * 8)
-#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
+#define TGL_PSR_MASK REG_GENMASK(2, 0)
+#define TGL_PSR_ERROR REG_BIT(2)
+#define TGL_PSR_POST_EXIT REG_BIT(1)
+#define TGL_PSR_PRE_ENTRY REG_BIT(0)
+#define EDP_PSR_MASK(trans) (TGL_PSR_MASK << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_ERROR(trans) (TGL_PSR_ERROR << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_POST_EXIT(trans) (TGL_PSR_POST_EXIT << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \
+ _EDP_PSR_TRANS_SHIFT(trans))
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index c4e932368b37..39da0fb0d6d2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -135,7 +135,7 @@ static int __run_selftests(const char *name,
int err = 0;
while (!i915_selftest.random_seed)
- i915_selftest.random_seed = get_random_int();
+ i915_selftest.random_seed = get_random_u32();
i915_selftest.timeout_jiffies =
i915_selftest.timeout_ms ?
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 16356611b5b9..5fe209107246 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -139,44 +139,24 @@ static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
}
}
-static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
- struct vm_fault *vmf, struct migrate_vma *args,
- dma_addr_t *dma_addr)
+static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
+ struct page *dpage, dma_addr_t *dma_addr)
{
struct device *dev = drm->dev->dev;
- struct page *dpage, *spage;
- struct nouveau_svmm *svmm;
-
- spage = migrate_pfn_to_page(args->src[0]);
- if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
- return 0;
- dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
- if (!dpage)
- return VM_FAULT_SIGBUS;
lock_page(dpage);
*dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr))
- goto error_free_page;
+ return -EIO;
- svmm = spage->zone_device_data;
- mutex_lock(&svmm->mutex);
- nouveau_svmm_invalidate(svmm, args->start, args->end);
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
- NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
- goto error_dma_unmap;
- mutex_unlock(&svmm->mutex);
+ NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
+ dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ return -EIO;
+ }
- args->dst[0] = migrate_pfn(page_to_pfn(dpage));
return 0;
-
-error_dma_unmap:
- mutex_unlock(&svmm->mutex);
- dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
-error_free_page:
- __free_page(dpage);
- return VM_FAULT_SIGBUS;
}
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
@@ -184,9 +164,11 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
struct nouveau_drm *drm = page_to_drm(vmf->page);
struct nouveau_dmem *dmem = drm->dmem;
struct nouveau_fence *fence;
+ struct nouveau_svmm *svmm;
+ struct page *spage, *dpage;
unsigned long src = 0, dst = 0;
dma_addr_t dma_addr = 0;
- vm_fault_t ret;
+ vm_fault_t ret = 0;
struct migrate_vma args = {
.vma = vmf->vma,
.start = vmf->address,
@@ -207,9 +189,25 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!args.cpages)
return 0;
- ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
- if (ret || dst == 0)
+ spage = migrate_pfn_to_page(src);
+ if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+ goto done;
+
+ dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
+ if (!dpage)
+ goto done;
+
+ dst = migrate_pfn(page_to_pfn(dpage));
+
+ svmm = spage->zone_device_data;
+ mutex_lock(&svmm->mutex);
+ nouveau_svmm_invalidate(svmm, args.start, args.end);
+ ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
+ mutex_unlock(&svmm->mutex);
+ if (ret) {
+ ret = VM_FAULT_SIGBUS;
goto done;
+ }
nouveau_fence_new(dmem->migrate.chan, false, &fence);
migrate_vma_pages(&args);
@@ -326,7 +324,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
return NULL;
}
- lock_page(page);
+ zone_device_page_init(page);
return page;
}
@@ -369,6 +367,52 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
mutex_unlock(&drm->dmem->mutex);
}
+/*
+ * Evict all pages mapping a chunk.
+ */
+static void
+nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
+{
+ unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
+ unsigned long *src_pfns, *dst_pfns;
+ dma_addr_t *dma_addrs;
+ struct nouveau_fence *fence;
+
+ src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+ dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+ dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+
+ migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
+ npages);
+
+ for (i = 0; i < npages; i++) {
+ if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
+ struct page *dpage;
+
+ /*
+ * _GFP_NOFAIL because the GPU is going away and there
+ * is nothing sensible we can do if we can't copy the
+ * data back.
+ */
+ dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ nouveau_dmem_copy_one(chunk->drm,
+ migrate_pfn_to_page(src_pfns[i]), dpage,
+ &dma_addrs[i]);
+ }
+ }
+
+ nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ nouveau_dmem_fence_done(&fence);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kfree(src_pfns);
+ kfree(dst_pfns);
+ for (i = 0; i < npages; i++)
+ dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ kfree(dma_addrs);
+}
+
void
nouveau_dmem_fini(struct nouveau_drm *drm)
{
@@ -380,8 +424,10 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
mutex_lock(&drm->dmem->mutex);
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
+ nouveau_dmem_evict_chunk(chunk);
nouveau_bo_unpin(chunk->bo);
nouveau_bo_ref(NULL, &chunk->bo);
+ WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
release_mem_region(chunk->pagemap.range.start,
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 7a2b2d6bc3fe..62f69589a72d 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -729,7 +729,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
static int drm_buddy_init_test(struct kunit *test)
{
while (!random_seed)
- random_seed = get_random_int();
+ random_seed = get_random_u32();
return 0;
}
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 659d1af4dca7..c4b66eeae203 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -2212,7 +2212,7 @@ err_nodes:
static int drm_mm_init_test(struct kunit *test)
{
while (!random_seed)
- random_seed = get_random_int();
+ random_seed = get_random_u32();
return 0;
}
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 7850287dfe7a..351c81a929a6 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1379,6 +1379,9 @@ static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
i3c_bus_set_addr_slot_status(&master->bus,
dev->info.dyn_addr,
I3C_ADDR_SLOT_I3C_DEV);
+ if (old_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, old_dyn_addr,
+ I3C_ADDR_SLOT_FREE);
}
if (master->ops->reattach_i3c_dev) {
@@ -1908,10 +1911,6 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
i3c_master_free_i3c_dev(olddev);
}
- ret = i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
- if (ret)
- goto err_detach_dev;
-
/*
* Depending on our previous state, the expected dynamic address might
* differ:
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 70da57ef2eeb..cc2222b85c88 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3807,7 +3807,7 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
- rover = prandom_u32() % remaining + low;
+ rover = prandom_u32_max(remaining) + low;
retry:
if (last_used_port != rover) {
struct rdma_bind_list *bind_list;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 14392c942f49..499a425a3379 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -734,7 +734,7 @@ static int send_connect(struct c4iw_ep *ep)
&ep->com.remote_addr;
int ret;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
struct net_device *netdev;
u64 params;
@@ -2469,7 +2469,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
}
if (!is_t4(adapter_type)) {
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index f64e7e02b129..280d61466855 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -54,7 +54,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
if (obj < alloc->max) {
if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
- alloc->last += prandom_u32() % RANDOM_SKIP;
+ alloc->last += prandom_u32_max(RANDOM_SKIP);
else
alloc->last = obj + 1;
if (alloc->last >= alloc->max)
@@ -85,7 +85,7 @@ int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
alloc->start = start;
alloc->flags = flags;
if (flags & C4IW_ID_TABLE_F_RANDOM)
- alloc->last = prandom_u32() % RANDOM_SKIP;
+ alloc->last = prandom_u32_max(RANDOM_SKIP);
else
alloc->last = 0;
alloc->max = num;
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 2a7abf7a1f7f..18b05ffb415a 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -850,7 +850,7 @@ void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
int i;
for (i = 0; i < RXE_NUM_TID_FLOWS; i++) {
- rcd->flows[i].generation = mask_generation(prandom_u32());
+ rcd->flows[i].generation = mask_generation(get_random_u32());
kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i);
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 492b122d0521..480c062dd04f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -41,9 +41,8 @@ static inline u16 get_ah_udp_sport(const struct rdma_ah_attr *ah_attr)
u16 sport;
if (!fl)
- sport = get_random_u32() %
- (IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 -
- IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) +
+ sport = prandom_u32_max(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 -
+ IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) +
IB_ROCE_UDP_ENCAP_VALID_PORT_MIN;
else
sport = rdma_flow_label_to_udp_sport(fl);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index d13ecbdd4391..a37cfac5e23f 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -96,7 +96,7 @@ static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
__be64 mlx4_ib_gen_node_guid(void)
{
#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
- return cpu_to_be64(NODE_GUID_HI | prandom_u32());
+ return cpu_to_be64(NODE_GUID_HI | get_random_u32());
}
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index ebb35b809f26..b610d36295bb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -465,7 +465,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id,
goto err_qp;
}
- psn = prandom_u32() & 0xffffff;
+ psn = get_random_u32() & 0xffffff;
ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
if (ret)
goto err_modify;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 758e1d7ebc36..8546b8816524 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1517,8 +1517,7 @@ static void rtrs_clt_err_recovery_work(struct work_struct *work)
rtrs_clt_stop_and_destroy_conns(clt_path);
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
msecs_to_jiffies(delay_ms +
- prandom_u32() %
- RTRS_RECONNECT_SEED));
+ prandom_u32_max(RTRS_RECONNECT_SEED)));
}
static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 00aecd67e348..a7e052c1db53 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -101,6 +101,7 @@ struct pca963x_led {
struct pca963x *chip;
struct led_classdev led_cdev;
int led_num; /* 0 .. 15 potentially */
+ bool blinking;
u8 gdc;
u8 gfrq;
};
@@ -129,12 +130,21 @@ static int pca963x_brightness(struct pca963x_led *led,
switch (brightness) {
case LED_FULL:
- val = (ledout & ~mask) | (PCA963X_LED_ON << shift);
+ if (led->blinking) {
+ val = (ledout & ~mask) | (PCA963X_LED_GRP_PWM << shift);
+ ret = i2c_smbus_write_byte_data(client,
+ PCA963X_PWM_BASE +
+ led->led_num,
+ LED_FULL);
+ } else {
+ val = (ledout & ~mask) | (PCA963X_LED_ON << shift);
+ }
ret = i2c_smbus_write_byte_data(client, ledout_addr, val);
break;
case LED_OFF:
val = ledout & ~mask;
ret = i2c_smbus_write_byte_data(client, ledout_addr, val);
+ led->blinking = false;
break;
default:
ret = i2c_smbus_write_byte_data(client,
@@ -144,7 +154,11 @@ static int pca963x_brightness(struct pca963x_led *led,
if (ret < 0)
return ret;
- val = (ledout & ~mask) | (PCA963X_LED_PWM << shift);
+ if (led->blinking)
+ val = (ledout & ~mask) | (PCA963X_LED_GRP_PWM << shift);
+ else
+ val = (ledout & ~mask) | (PCA963X_LED_PWM << shift);
+
ret = i2c_smbus_write_byte_data(client, ledout_addr, val);
break;
}
@@ -181,6 +195,7 @@ static void pca963x_blink(struct pca963x_led *led)
}
mutex_unlock(&led->chip->mutex);
+ led->blinking = true;
}
static int pca963x_power_state(struct pca963x_led *led)
@@ -275,6 +290,8 @@ static int pca963x_blink_set(struct led_classdev *led_cdev,
led->gfrq = gfrq;
pca963x_blink(led);
+ led->led_cdev.brightness = LED_FULL;
+ pca963x_led_set(led_cdev, LED_FULL);
*delay_on = time_on;
*delay_off = time_off;
@@ -337,6 +354,7 @@ static int pca963x_register_leds(struct i2c_client *client,
led->led_cdev.brightness_set_blocking = pca963x_led_set;
if (hw_blink)
led->led_cdev.blink_set = pca963x_blink_set;
+ led->blinking = false;
init_data.fwnode = child;
/* for backwards compatibility */
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index f2c5a7e06fa9..3427555b0cca 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -401,7 +401,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
}
if (bypass_torture_test(dc)) {
- if ((get_random_int() & 3) == 3)
+ if (prandom_u32_max(4) == 3)
goto skip;
else
goto rescale;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 09c7ed2650ca..9c5ef818ca36 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -795,7 +795,8 @@ static void __make_buffer_clean(struct dm_buffer *b)
{
BUG_ON(b->hold_count);
- if (!b->state) /* fast case */
+ /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
+ if (!smp_load_acquire(&b->state)) /* fast case */
return;
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
@@ -816,7 +817,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
BUG_ON(test_bit(B_DIRTY, &b->state));
if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
- unlikely(test_bit(B_READING, &b->state)))
+ unlikely(test_bit_acquire(B_READING, &b->state)))
continue;
if (!b->hold_count) {
@@ -1058,7 +1059,7 @@ found_buffer:
* If the user called both dm_bufio_prefetch and dm_bufio_get on
* the same buffer, it would deadlock if we waited.
*/
- if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
+ if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state)))
return NULL;
b->hold_count++;
@@ -1218,7 +1219,7 @@ void dm_bufio_release(struct dm_buffer *b)
* invalid buffer.
*/
if ((b->read_error || b->write_error) &&
- !test_bit(B_READING, &b->state) &&
+ !test_bit_acquire(B_READING, &b->state) &&
!test_bit(B_WRITING, &b->state) &&
!test_bit(B_DIRTY, &b->state)) {
__unlink_buffer(b);
@@ -1479,7 +1480,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_release_move);
static void forget_buffer_locked(struct dm_buffer *b)
{
- if (likely(!b->hold_count) && likely(!b->state)) {
+ if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) {
__unlink_buffer(b);
__free_buffer_wake(b);
}
@@ -1639,7 +1640,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
{
if (!(gfp & __GFP_FS) ||
(static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
- if (test_bit(B_READING, &b->state) ||
+ if (test_bit_acquire(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
return false;
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index c05fc3436cef..06eb31af626f 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -166,7 +166,7 @@ struct dm_cache_policy_type {
struct dm_cache_policy_type *real;
/*
- * Policies may store a hint for each each cache block.
+ * Policies may store a hint for each cache block.
* Currently the size of this hint must be 0 or 4 bytes but we
* expect to relax this in future.
*/
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 811b0a5379d0..2f1cc66d2641 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -2035,7 +2035,7 @@ static void disable_passdown_if_not_supported(struct clone *clone)
reason = "max discard sectors smaller than a region";
if (reason) {
- DMWARN("Destination device (%pd) %s: Disabling discard passdown.",
+ DMWARN("Destination device (%pg) %s: Disabling discard passdown.",
dest_dev, reason);
clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 98976aaa9db9..6b3f867d0b70 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -434,10 +434,10 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
hc = __get_name_cell(new);
if (hc) {
- DMWARN("Unable to change %s on mapped device %s to one that "
- "already exists: %s",
- change_uuid ? "uuid" : "name",
- param->name, new);
+ DMERR("Unable to change %s on mapped device %s to one that "
+ "already exists: %s",
+ change_uuid ? "uuid" : "name",
+ param->name, new);
dm_put(hc->md);
up_write(&_hash_lock);
kfree(new_data);
@@ -449,8 +449,8 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
*/
hc = __get_name_cell(param->name);
if (!hc) {
- DMWARN("Unable to rename non-existent device, %s to %s%s",
- param->name, change_uuid ? "uuid " : "", new);
+ DMERR("Unable to rename non-existent device, %s to %s%s",
+ param->name, change_uuid ? "uuid " : "", new);
up_write(&_hash_lock);
kfree(new_data);
return ERR_PTR(-ENXIO);
@@ -460,9 +460,9 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
* Does this device already have a uuid?
*/
if (change_uuid && hc->uuid) {
- DMWARN("Unable to change uuid of mapped device %s to %s "
- "because uuid is already set to %s",
- param->name, new, hc->uuid);
+ DMERR("Unable to change uuid of mapped device %s to %s "
+ "because uuid is already set to %s",
+ param->name, new, hc->uuid);
dm_put(hc->md);
up_write(&_hash_lock);
kfree(new_data);
@@ -750,7 +750,7 @@ static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t
static int check_name(const char *name)
{
if (strchr(name, '/')) {
- DMWARN("invalid device name");
+ DMERR("invalid device name");
return -EINVAL;
}
@@ -773,7 +773,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src
down_read(&_hash_lock);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
- DMWARN("device has been removed from the dev hash table.");
+ DMERR("device has been removed from the dev hash table.");
goto out;
}
@@ -1026,7 +1026,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si
if (new_data < param->data ||
invalid_str(new_data, (void *) param + param_size) || !*new_data ||
strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
- DMWARN("Invalid new mapped device name or uuid string supplied.");
+ DMERR("Invalid new mapped device name or uuid string supplied.");
return -EINVAL;
}
@@ -1061,7 +1061,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
if (geostr < param->data ||
invalid_str(geostr, (void *) param + param_size)) {
- DMWARN("Invalid geometry supplied.");
+ DMERR("Invalid geometry supplied.");
goto out;
}
@@ -1069,13 +1069,13 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
indata + 1, indata + 2, indata + 3, &dummy);
if (x != 4) {
- DMWARN("Unable to interpret geometry settings.");
+ DMERR("Unable to interpret geometry settings.");
goto out;
}
if (indata[0] > 65535 || indata[1] > 255 ||
indata[2] > 255 || indata[3] > ULONG_MAX) {
- DMWARN("Geometry exceeds range limits.");
+ DMERR("Geometry exceeds range limits.");
goto out;
}
@@ -1387,7 +1387,7 @@ static int populate_table(struct dm_table *table,
char *target_params;
if (!param->target_count) {
- DMWARN("populate_table: no targets specified");
+ DMERR("populate_table: no targets specified");
return -EINVAL;
}
@@ -1395,7 +1395,7 @@ static int populate_table(struct dm_table *table,
r = next_target(spec, next, end, &spec, &target_params);
if (r) {
- DMWARN("unable to find target");
+ DMERR("unable to find target");
return r;
}
@@ -1404,7 +1404,7 @@ static int populate_table(struct dm_table *table,
(sector_t) spec->length,
target_params);
if (r) {
- DMWARN("error adding target to table");
+ DMERR("error adding target to table");
return r;
}
@@ -1451,8 +1451,8 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
if (immutable_target_type &&
(immutable_target_type != dm_table_get_immutable_target_type(t)) &&
!dm_table_get_wildcard_target(t)) {
- DMWARN("can't replace immutable target type %s",
- immutable_target_type->name);
+ DMERR("can't replace immutable target type %s",
+ immutable_target_type->name);
r = -EINVAL;
goto err_unlock_md_type;
}
@@ -1461,12 +1461,12 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md, t);
if (r) {
- DMWARN("unable to set up device queue for new table.");
+ DMERR("unable to set up device queue for new table.");
goto err_unlock_md_type;
}
} else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
- DMWARN("can't change device type (old=%u vs new=%u) after initial table load.",
- dm_get_md_type(md), dm_table_get_type(t));
+ DMERR("can't change device type (old=%u vs new=%u) after initial table load.",
+ dm_get_md_type(md), dm_table_get_type(t));
r = -EINVAL;
goto err_unlock_md_type;
}
@@ -1477,7 +1477,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
down_write(&_hash_lock);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
- DMWARN("device has been removed from the dev hash table.");
+ DMERR("device has been removed from the dev hash table.");
up_write(&_hash_lock);
r = -ENXIO;
goto err_destroy_table;
@@ -1686,19 +1686,19 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
if (tmsg < (struct dm_target_msg *) param->data ||
invalid_str(tmsg->message, (void *) param + param_size)) {
- DMWARN("Invalid target message parameters.");
+ DMERR("Invalid target message parameters.");
r = -EINVAL;
goto out;
}
r = dm_split_args(&argc, &argv, tmsg->message);
if (r) {
- DMWARN("Failed to split target message parameters");
+ DMERR("Failed to split target message parameters");
goto out;
}
if (!argc) {
- DMWARN("Empty message received.");
+ DMERR("Empty message received.");
r = -EINVAL;
goto out_argv;
}
@@ -1718,12 +1718,12 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
ti = dm_table_find_target(table, tmsg->sector);
if (!ti) {
- DMWARN("Target message sector outside device.");
+ DMERR("Target message sector outside device.");
r = -EINVAL;
} else if (ti->type->message)
r = ti->type->message(ti, argc, argv, result, maxlen);
else {
- DMWARN("Target type does not support messages");
+ DMERR("Target type does not support messages");
r = -EINVAL;
}
@@ -1814,11 +1814,11 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
if ((DM_VERSION_MAJOR != version[0]) ||
(DM_VERSION_MINOR < version[1])) {
- DMWARN("ioctl interface mismatch: "
- "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
- DM_VERSION_MAJOR, DM_VERSION_MINOR,
- DM_VERSION_PATCHLEVEL,
- version[0], version[1], version[2], cmd);
+ DMERR("ioctl interface mismatch: "
+ "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
+ DM_VERSION_MAJOR, DM_VERSION_MINOR,
+ DM_VERSION_PATCHLEVEL,
+ version[0], version[1], version[2], cmd);
r = -EINVAL;
}
@@ -1927,11 +1927,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
if (cmd == DM_DEV_CREATE_CMD) {
if (!*param->name) {
- DMWARN("name not supplied when creating device");
+ DMERR("name not supplied when creating device");
return -EINVAL;
}
} else if (*param->uuid && *param->name) {
- DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
+ DMERR("only supply one of name or uuid, cmd(%u)", cmd);
return -EINVAL;
}
@@ -1978,7 +1978,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
fn = lookup_ioctl(cmd, &ioctl_flags);
if (!fn) {
- DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
+ DMERR("dm_ctl_ioctl: unknown command 0x%x", command);
return -ENOTTY;
}
@@ -2203,7 +2203,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
(sector_t) spec_array[i]->length,
target_params_array[i]);
if (r) {
- DMWARN("error adding target to table");
+ DMERR("error adding target to table");
goto err_destroy_table;
}
}
@@ -2216,7 +2216,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
/* setup md->queue to reflect md's type (may block) */
r = dm_setup_md_queue(md, t);
if (r) {
- DMWARN("unable to set up device queue for new table.");
+ DMERR("unable to set up device queue for new table.");
goto err_destroy_table;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c640be453313..54263679a7b1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2529,7 +2529,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
* of the "sync" directive.
*
* With reshaping capability added, we must ensure that
- * that the "sync" directive is disallowed during the reshape.
+ * the "sync" directive is disallowed during the reshape.
*/
if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
continue;
@@ -2590,7 +2590,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
/*
* Adjust data_offset and new_data_offset on all disk members of @rs
- * for out of place reshaping if requested by contructor
+ * for out of place reshaping if requested by constructor
*
* We need free space at the beginning of each raid disk for forward
* and at the end for backward reshapes which userspace has to provide
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 3001b10a3fbf..a41209a43506 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -238,7 +238,7 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
dm_requeue_original_request(tio, true);
break;
default:
- DMWARN("unimplemented target endio return value: %d", r);
+ DMCRIT("unimplemented target endio return value: %d", r);
BUG();
}
}
@@ -409,7 +409,7 @@ static int map_request(struct dm_rq_target_io *tio)
dm_kill_unmapped_request(rq, BLK_STS_IOERR);
break;
default:
- DMWARN("unimplemented target map return value: %d", r);
+ DMCRIT("unimplemented target map return value: %d", r);
BUG();
}
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8326f9fe0e91..f105a71915ab 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -1220,7 +1220,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
return 2; /* this wasn't a stats message */
if (r == -EINVAL)
- DMWARN("Invalid parameters for message %s", argv[0]);
+ DMCRIT("Invalid parameters for message %s", argv[0]);
return r;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index d8034ff0cb24..078da18bb86d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -234,12 +234,12 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 0;
if ((start >= dev_size) || (start + len > dev_size)) {
- DMWARN("%s: %pg too small for target: "
- "start=%llu, len=%llu, dev_size=%llu",
- dm_device_name(ti->table->md), bdev,
- (unsigned long long)start,
- (unsigned long long)len,
- (unsigned long long)dev_size);
+ DMERR("%s: %pg too small for target: "
+ "start=%llu, len=%llu, dev_size=%llu",
+ dm_device_name(ti->table->md), bdev,
+ (unsigned long long)start,
+ (unsigned long long)len,
+ (unsigned long long)dev_size);
return 1;
}
@@ -251,10 +251,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
unsigned int zone_sectors = bdev_zone_sectors(bdev);
if (start & (zone_sectors - 1)) {
- DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg",
- dm_device_name(ti->table->md),
- (unsigned long long)start,
- zone_sectors, bdev);
+ DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg",
+ dm_device_name(ti->table->md),
+ (unsigned long long)start,
+ zone_sectors, bdev);
return 1;
}
@@ -268,10 +268,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
* the sector range.
*/
if (len & (zone_sectors - 1)) {
- DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg",
- dm_device_name(ti->table->md),
- (unsigned long long)len,
- zone_sectors, bdev);
+ DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg",
+ dm_device_name(ti->table->md),
+ (unsigned long long)len,
+ zone_sectors, bdev);
return 1;
}
}
@@ -280,20 +280,20 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 0;
if (start & (logical_block_size_sectors - 1)) {
- DMWARN("%s: start=%llu not aligned to h/w "
- "logical block size %u of %pg",
- dm_device_name(ti->table->md),
- (unsigned long long)start,
- limits->logical_block_size, bdev);
+ DMERR("%s: start=%llu not aligned to h/w "
+ "logical block size %u of %pg",
+ dm_device_name(ti->table->md),
+ (unsigned long long)start,
+ limits->logical_block_size, bdev);
return 1;
}
if (len & (logical_block_size_sectors - 1)) {
- DMWARN("%s: len=%llu not aligned to h/w "
- "logical block size %u of %pg",
- dm_device_name(ti->table->md),
- (unsigned long long)len,
- limits->logical_block_size, bdev);
+ DMERR("%s: len=%llu not aligned to h/w "
+ "logical block size %u of %pg",
+ dm_device_name(ti->table->md),
+ (unsigned long long)len,
+ limits->logical_block_size, bdev);
return 1;
}
@@ -434,8 +434,8 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
}
}
if (!found) {
- DMWARN("%s: device %s not in table devices list",
- dm_device_name(ti->table->md), d->name);
+ DMERR("%s: device %s not in table devices list",
+ dm_device_name(ti->table->md), d->name);
return;
}
if (refcount_dec_and_test(&dd->count)) {
@@ -618,12 +618,12 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t,
}
if (remaining) {
- DMWARN("%s: table line %u (start sect %llu len %llu) "
- "not aligned to h/w logical block size %u",
- dm_device_name(t->md), i,
- (unsigned long long) ti->begin,
- (unsigned long long) ti->len,
- limits->logical_block_size);
+ DMERR("%s: table line %u (start sect %llu len %llu) "
+ "not aligned to h/w logical block size %u",
+ dm_device_name(t->md), i,
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len,
+ limits->logical_block_size);
return -EINVAL;
}
@@ -1008,7 +1008,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
struct dm_md_mempools *pools;
if (unlikely(type == DM_TYPE_NONE)) {
- DMWARN("no table type is set, can't allocate mempools");
+ DMERR("no table type is set, can't allocate mempools");
return -EINVAL;
}
@@ -1112,7 +1112,7 @@ static bool integrity_profile_exists(struct gendisk *disk)
* Get a disk whose integrity profile reflects the table's profile.
* Returns NULL if integrity support was inconsistent or unavailable.
*/
-static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
+static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t)
{
struct list_head *devices = dm_table_get_devices(t);
struct dm_dev_internal *dd = NULL;
@@ -1185,10 +1185,10 @@ static int dm_table_register_integrity(struct dm_table *t)
* profile the new profile should not conflict.
*/
if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
- DMWARN("%s: conflict with existing integrity profile: "
- "%s profile mismatch",
- dm_device_name(t->md),
- template_disk->disk_name);
+ DMERR("%s: conflict with existing integrity profile: "
+ "%s profile mismatch",
+ dm_device_name(t->md),
+ template_disk->disk_name);
return 1;
}
@@ -1327,7 +1327,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
if (t->md->queue &&
!blk_crypto_has_capabilities(profile,
t->md->queue->crypto_profile)) {
- DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
+ DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
dm_destroy_crypto_profile(profile);
return -EINVAL;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 8a00cc42e498..ccf5b852fbf7 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1401,14 +1401,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
/* WQ_UNBOUND greatly improves performance when running on ramdisk */
wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
- if (v->use_tasklet) {
- /*
- * Allow verify_wq to preempt softirq since verification in
- * tasklet will fall-back to using it for error handling
- * (or if the bufio cache doesn't have required hashes).
- */
- wq_flags |= WQ_HIGHPRI;
- }
+ /*
+ * Using WQ_HIGHPRI improves throughput and completion latency by
+ * reducing wait times when reading from a dm-verity device.
+ *
+ * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
+ * allows verify_wq to preempt softirq since verification in tasklet
+ * will fall-back to using it for error handling (or if the bufio cache
+ * doesn't have required hashes).
+ */
+ wq_flags |= WQ_HIGHPRI;
v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 60549b65c799..95a1ee3d314e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -864,7 +864,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
if (geo->start > sz) {
- DMWARN("Start sector is beyond the geometry limits.");
+ DMERR("Start sector is beyond the geometry limits.");
return -EINVAL;
}
@@ -1149,7 +1149,7 @@ static void clone_endio(struct bio *bio)
/* The target will handle the io */
return;
default:
- DMWARN("unimplemented target endio return value: %d", r);
+ DMCRIT("unimplemented target endio return value: %d", r);
BUG();
}
}
@@ -1455,7 +1455,7 @@ static void __map_bio(struct bio *clone)
dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
- DMWARN("unimplemented target map return value: %d", r);
+ DMCRIT("unimplemented target map return value: %d", r);
BUG();
}
}
@@ -2005,7 +2005,7 @@ static struct mapped_device *alloc_dev(int minor)
md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) {
- DMWARN("unable to allocate device, out of memory.");
+ DMERR("unable to allocate device, out of memory.");
return NULL;
}
@@ -2065,7 +2065,6 @@ static struct mapped_device *alloc_dev(int minor)
md->disk->minors = 1;
md->disk->flags |= GENHD_FL_NO_PART;
md->disk->fops = &dm_blk_dops;
- md->disk->queue = md->queue;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 79c73330020b..832d8566e165 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -2994,7 +2994,7 @@ static int r5l_load_log(struct r5l_log *log)
}
create:
if (create_super) {
- log->last_cp_seq = prandom_u32();
+ log->last_cp_seq = get_random_u32();
cp = 0;
r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
/*
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 9b7bcdce6e44..303d02b1d71c 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -870,7 +870,7 @@ static void precalculate_color(struct tpg_data *tpg, int k)
g = tpg_colors[col].g;
b = tpg_colors[col].b;
} else if (tpg->pattern == TPG_PAT_NOISE) {
- r = g = b = prandom_u32_max(256);
+ r = g = b = get_random_u8();
} else if (k == TPG_COLOR_RANDOM) {
r = g = b = tpg->qual_offset + prandom_u32_max(196);
} else if (k >= TPG_COLOR_RAMP) {
diff --git a/drivers/media/test-drivers/vivid/vivid-radio-rx.c b/drivers/media/test-drivers/vivid/vivid-radio-rx.c
index 232cab508f48..8bd09589fb15 100644
--- a/drivers/media/test-drivers/vivid/vivid-radio-rx.c
+++ b/drivers/media/test-drivers/vivid/vivid-radio-rx.c
@@ -104,8 +104,8 @@ retry:
break;
case 2:
rds.block |= V4L2_RDS_BLOCK_ERROR;
- rds.lsb = prandom_u32_max(256);
- rds.msb = prandom_u32_max(256);
+ rds.lsb = get_random_u8();
+ rds.msb = get_random_u8();
break;
case 3: /* Skip block altogether */
if (i)
diff --git a/drivers/media/test-drivers/vivid/vivid-touch-cap.c b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
index 64e3e4cb30c2..6cc32eb54f9d 100644
--- a/drivers/media/test-drivers/vivid/vivid-touch-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-touch-cap.c
@@ -210,7 +210,7 @@ static void vivid_fill_buff_noise(__s16 *tch_buf, int size)
/* Fill 10% of the values within range -3 and 3, zero the others */
for (i = 0; i < size; i++) {
- unsigned int rand = get_random_int();
+ unsigned int rand = get_random_u32();
if (rand % 10)
tch_buf[i] = 0;
@@ -221,7 +221,7 @@ static void vivid_fill_buff_noise(__s16 *tch_buf, int size)
static inline int get_random_pressure(void)
{
- return get_random_int() % VIVID_PRESSURE_LIMIT;
+ return prandom_u32_max(VIVID_PRESSURE_LIMIT);
}
static void vivid_tch_buf_set(struct v4l2_pix_format *f,
@@ -272,7 +272,7 @@ void vivid_fillbuff_tch(struct vivid_dev *dev, struct vivid_buffer *buf)
return;
if (test_pat_idx == 0)
- dev->tch_pat_random = get_random_int();
+ dev->tch_pat_random = get_random_u32();
rand = dev->tch_pat_random;
switch (test_pattern) {
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2.c b/drivers/misc/habanalabs/gaudi2/gaudi2.c
index 75c4bef7841c..65e6cae6100a 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2.c
@@ -2948,7 +2948,7 @@ static void gaudi2_user_interrupt_setup(struct hl_device *hdev)
static inline int gaudi2_get_non_zero_random_int(void)
{
- int rand = get_random_int();
+ int rand = get_random_u32();
return rand ? rand : 1;
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ce89611a136e..54cd009aee50 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1140,8 +1140,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
+ unsigned int arg = card->erase_arg;
- mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, card->erase_arg);
+ if (mmc_card_broken_sd_discard(card))
+ arg = SD_ERASE_ARG;
+
+ mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg);
}
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 99045e138ba4..cfdd1ff40b86 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -73,6 +73,7 @@ struct mmc_fixup {
#define EXT_CSD_REV_ANY (-1u)
#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_SANDISK_SD 0x3
#define CID_MANFID_ATP 0x9
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
@@ -258,4 +259,9 @@ static inline int mmc_card_broken_hpi(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_HPI;
}
+static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
+}
+
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ef53a2578824..95fa8fb1d45f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -97,8 +97,8 @@ static void mmc_should_fail_request(struct mmc_host *host,
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
- data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
- data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
+ data->error = data_errors[prandom_u32_max(ARRAY_SIZE(data_errors))];
+ data->bytes_xfered = prandom_u32_max(data->bytes_xfered >> 9) << 9;
}
#else /* CONFIG_FAIL_MMC_REQUEST */
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index be4393988086..29b9497936df 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -100,6 +100,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_TRIM_BROKEN),
+ /*
+ * Some SD cards reports discard support while they don't
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_DISCARD),
+
END_FIXUP
};
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 581614196a84..c78bbc22e0d1 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1858,7 +1858,7 @@ static void dw_mci_start_fault_timer(struct dw_mci *host)
* Try to inject the error at random points during the data transfer.
*/
hrtimer_start(&host->fault_timer,
- ms_to_ktime(prandom_u32() % 25),
+ ms_to_ktime(prandom_u32_max(25)),
HRTIMER_MODE_REL);
}
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 6edbf5c161ab..b970699743e0 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -128,6 +128,7 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
struct clk *ref_clk = priv->clk;
unsigned int freq, diff, best_freq = 0, diff_min = ~0;
unsigned int new_clock, clkh_shift = 0;
+ unsigned int new_upper_limit;
int i;
/*
@@ -153,13 +154,20 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
* greater than, new_clock. As we can divide by 1 << i for
* any i in [0, 9] we want the input clock to be as close as
* possible, but no greater than, new_clock << i.
+ *
+ * Add an upper limit of 1/1024 rate higher to the clock rate to fix
+ * clk rate jumping to lower rate due to rounding error (eg: RZ/G2L has
+ * 3 clk sources 533.333333 MHz, 400 MHz and 266.666666 MHz. The request
+ * for 533.333333 MHz will selects a slower 400 MHz due to rounding
+ * error (533333333 Hz / 4 * 4 = 533333332 Hz < 533333333 Hz)).
*/
for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) {
freq = clk_round_rate(ref_clk, new_clock << i);
- if (freq > (new_clock << i)) {
+ new_upper_limit = (new_clock << i) + ((new_clock << i) >> 10);
+ if (freq > new_upper_limit) {
/* Too fast; look for a slightly slower option */
freq = clk_round_rate(ref_clk, (new_clock << i) / 4 * 3);
- if (freq > (new_clock << i))
+ if (freq > new_upper_limit)
continue;
}
@@ -181,6 +189,7 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
unsigned int new_clock)
{
+ unsigned int clk_margin;
u32 clk = 0, clock;
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
@@ -194,7 +203,13 @@ static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
host->mmc->actual_clock = renesas_sdhi_clk_update(host, new_clock);
clock = host->mmc->actual_clock / 512;
- for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
+ /*
+ * Add a margin of 1/1024 rate higher to the clock rate in order
+ * to avoid clk variable setting a value of 0 due to the margin
+ * provided for actual_clock in renesas_sdhi_clk_update().
+ */
+ clk_margin = new_clock >> 10;
+ for (clk = 0x80000080; new_clock + clk_margin >= (clock << 1); clk >>= 1)
clock <<= 1;
/* 1/1 clock is option */
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 46c55ab4884c..b92a408f138d 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -309,7 +309,7 @@ static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host)
static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host)
{
- return 400000;
+ return 100000;
}
static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host,
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 2d2d8260c681..413925bce0ca 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -773,7 +773,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
dev_err(dev, "failed to set clk rate to %luHz: %d\n",
host_clk, err);
- tegra_host->curr_clk_rate = host_clk;
+ tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
if (tegra_host->ddr_signaling)
host->max_clk = host_clk;
else
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 24beade95c7f..672719023241 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -1393,7 +1393,7 @@ static int ns_do_read_error(struct nandsim *ns, int num)
unsigned int page_no = ns->regs.row;
if (ns_read_error(page_no)) {
- prandom_bytes(ns->buf.byte, num);
+ get_random_bytes(ns->buf.byte, num);
NS_WARN("simulating read error in page %u\n", page_no);
return 1;
}
@@ -1402,12 +1402,12 @@ static int ns_do_read_error(struct nandsim *ns, int num)
static void ns_do_bit_flips(struct nandsim *ns, int num)
{
- if (bitflips && prandom_u32() < (1 << 22)) {
+ if (bitflips && get_random_u16() < (1 << 6)) {
int flips = 1;
if (bitflips > 1)
- flips = (prandom_u32() % (int) bitflips) + 1;
+ flips = prandom_u32_max(bitflips) + 1;
while (flips--) {
- int pos = prandom_u32() % (num * 8);
+ int pos = prandom_u32_max(num * 8);
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
NS_WARN("read_page: flipping bit %d in page %d "
"reading from %d ecc: corrected=%u failed=%u\n",
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index c4f271314f52..440988562cfd 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -47,7 +47,7 @@ struct nand_ecc_test {
static void single_bit_error_data(void *error_data, void *correct_data,
size_t size)
{
- unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32_max(size * BITS_PER_BYTE);
memcpy(error_data, correct_data, size);
__change_bit_le(offset, error_data);
@@ -58,9 +58,9 @@ static void double_bit_error_data(void *error_data, void *correct_data,
{
unsigned int offset[2];
- offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
+ offset[0] = prandom_u32_max(size * BITS_PER_BYTE);
do {
- offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
+ offset[1] = prandom_u32_max(size * BITS_PER_BYTE);
} while (offset[0] == offset[1]);
memcpy(error_data, correct_data, size);
@@ -71,7 +71,7 @@ static void double_bit_error_data(void *error_data, void *correct_data,
static unsigned int random_ecc_bit(size_t size)
{
- unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32_max(3 * BITS_PER_BYTE);
if (size == 256) {
/*
@@ -79,7 +79,7 @@ static unsigned int random_ecc_bit(size_t size)
* and 17th bit) in ECC code for 256 byte data block
*/
while (offset == 16 || offset == 17)
- offset = prandom_u32() % (3 * BITS_PER_BYTE);
+ offset = prandom_u32_max(3 * BITS_PER_BYTE);
}
return offset;
@@ -266,7 +266,7 @@ static int nand_ecc_test_run(const size_t size)
goto error;
}
- prandom_bytes(correct_data, size);
+ get_random_bytes(correct_data, size);
ecc_sw_hamming_calculate(correct_data, size, correct_ecc, sm_order);
for (i = 0; i < ARRAY_SIZE(nand_ecc_test); i++) {
nand_ecc_test[i].prepare(error_data, error_ecc,
diff --git a/drivers/mtd/tests/speedtest.c b/drivers/mtd/tests/speedtest.c
index c9ec7086bfa1..075bce32caa5 100644
--- a/drivers/mtd/tests/speedtest.c
+++ b/drivers/mtd/tests/speedtest.c
@@ -223,7 +223,7 @@ static int __init mtd_speedtest_init(void)
if (!iobuf)
goto out;
- prandom_bytes(iobuf, mtd->erasesize);
+ get_random_bytes(iobuf, mtd->erasesize);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
diff --git a/drivers/mtd/tests/stresstest.c b/drivers/mtd/tests/stresstest.c
index cb29c8c1b370..75b6ddc5dc4d 100644
--- a/drivers/mtd/tests/stresstest.c
+++ b/drivers/mtd/tests/stresstest.c
@@ -45,9 +45,8 @@ static int rand_eb(void)
unsigned int eb;
again:
- eb = prandom_u32();
/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
- eb %= (ebcnt - 1);
+ eb = prandom_u32_max(ebcnt - 1);
if (bbt[eb])
goto again;
return eb;
@@ -55,20 +54,12 @@ again:
static int rand_offs(void)
{
- unsigned int offs;
-
- offs = prandom_u32();
- offs %= bufsize;
- return offs;
+ return prandom_u32_max(bufsize);
}
static int rand_len(int offs)
{
- unsigned int len;
-
- len = prandom_u32();
- len %= (bufsize - offs);
- return len;
+ return prandom_u32_max(bufsize - offs);
}
static int do_read(void)
@@ -127,7 +118,7 @@ static int do_write(void)
static int do_operation(void)
{
- if (prandom_u32() & 1)
+ if (prandom_u32_max(2))
return do_read();
else
return do_write();
@@ -192,7 +183,7 @@ static int __init mtd_stresstest_init(void)
goto out;
for (i = 0; i < ebcnt; i++)
offsets[i] = mtd->erasesize;
- prandom_bytes(writebuf, bufsize);
+ get_random_bytes(writebuf, bufsize);
bbt = kzalloc(ebcnt, GFP_KERNEL);
if (!bbt)
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 4cf67a2a0d04..75eaecc8639f 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -409,7 +409,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
ret = blk_mq_alloc_tag_set(&dev->tag_set);
if (ret) {
dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
- goto out_free_dev;;
+ goto out_free_dev;
}
@@ -441,7 +441,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
/*
* Create one workqueue per volume (per registered block device).
- * Rembember workqueues are cheap, they're not threads.
+ * Remember workqueues are cheap, they're not threads.
*/
dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
if (!dev->wq) {
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a32050fecabf..a901f8edfa41 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -807,6 +807,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
+ * @disable_fm: whether disable fastmap
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -814,11 +815,15 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* automatically. Returns the new UBI device number in case of success and a
* negative error code in case of failure.
*
+ * If @disable_fm is true, ubi doesn't create new fastmap even the module param
+ * 'fm_autoconvert' is set, and existed old fastmap will be destroyed after
+ * doing full scanning.
+ *
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
- int vid_hdr_offset, int max_beb_per1024)
+ int vid_hdr_offset, int max_beb_per1024, bool disable_fm)
{
struct ubi_device *ubi;
int i, err;
@@ -921,7 +926,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
UBI_FM_MIN_POOL_SIZE);
ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
- ubi->fm_disabled = !fm_autoconvert;
+ ubi->fm_disabled = (!fm_autoconvert || disable_fm) ? 1 : 0;
if (fm_debug)
ubi_enable_dbg_chk_fastmap(ubi);
@@ -962,7 +967,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi->fm_buf)
goto out_free;
#endif
- err = ubi_attach(ubi, 0);
+ err = ubi_attach(ubi, disable_fm ? 1 : 0);
if (err) {
ubi_err(ubi, "failed to attach mtd%d, error %d",
mtd->index, err);
@@ -1242,7 +1247,8 @@ static int __init ubi_init(void)
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, p->ubi_num,
- p->vid_hdr_offs, p->max_beb_per1024);
+ p->vid_hdr_offs, p->max_beb_per1024,
+ false);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
pr_err("UBI error: cannot attach mtd%d\n",
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index cc9a28cf9d82..f43430b9c1e6 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -672,7 +672,7 @@ static int verify_rsvol_req(const struct ubi_device *ubi,
* @req: volumes re-name request
*
* This is a helper function for the volume re-name IOCTL which validates the
- * the request, opens the volume and calls corresponding volumes management
+ * request, opens the volume and calls corresponding volumes management
* function. Returns zero in case of success and a negative error code in case
* of failure.
*/
@@ -1041,7 +1041,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
*/
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
- req.max_beb_per1024);
+ req.max_beb_per1024, !!req.disable_fm);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 31d427ee191a..908d0e088557 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -590,7 +590,7 @@ int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
- ubi->dbg.power_cut_counter += prandom_u32() % range;
+ ubi->dbg.power_cut_counter += prandom_u32_max(range);
}
return 0;
}
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 118248a5d7d4..dc8d8f83657a 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -73,7 +73,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_bitflips)
- return !(prandom_u32() % 200);
+ return !prandom_u32_max(200);
return 0;
}
@@ -87,7 +87,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_io_failures)
- return !(prandom_u32() % 500);
+ return !prandom_u32_max(500);
return 0;
}
@@ -101,7 +101,7 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_io_failures)
- return !(prandom_u32() % 400);
+ return !prandom_u32_max(400);
return 0;
}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index ccc5979642b7..09c408c45a62 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -377,7 +377,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
*
* This function locks a logical eraseblock for writing if there is no
* contention and does nothing if there is contention. Returns %0 in case of
- * success, %1 in case of contention, and and a negative error code in case of
+ * success, %1 in case of contention, and a negative error code in case of
* failure.
*/
static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 6e95c4b1473e..ca2d9efe62c3 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -20,8 +20,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
if (!ubi_dbg_chk_fastmap(ubi))
return NULL;
- ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
- GFP_KERNEL);
+ ret = bitmap_zalloc(ubi->peb_count, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -34,7 +33,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
*/
static inline void free_seen(unsigned long *seen)
{
- kfree(seen);
+ bitmap_free(seen);
}
/**
@@ -1108,8 +1107,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
if (!ubi->fast_attach)
return 0;
- vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
- GFP_KERNEL);
+ vol->checkmap = bitmap_zalloc(leb_count, GFP_KERNEL);
if (!vol->checkmap)
return -ENOMEM;
@@ -1118,7 +1116,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
{
- kfree(vol->checkmap);
+ bitmap_free(vol->checkmap);
}
/**
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 8a7306cc1947..01b644861253 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1147,7 +1147,7 @@ fail:
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
- * This function returns zero if the erase counter header is all right and and
+ * This function returns zero if the erase counter header is all right and
* a negative error code if not or if an error occurred.
*/
static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 386db0598e95..2c9cd3b6434f 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -131,7 +131,7 @@ enum {
* is changed radically. This field is duplicated in the volume identifier
* header.
*
- * The @vid_hdr_offset and @data_offset fields contain the offset of the the
+ * The @vid_hdr_offset and @data_offset fields contain the offset of the
* volume identifier header and user data, relative to the beginning of the
* physical eraseblock. These values have to be the same for all physical
* eraseblocks.
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 078112e23dfd..c8f1bd4fa100 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -86,7 +86,7 @@ void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
* Error codes returned by the I/O sub-system.
*
* UBI_IO_FF: the read region of flash contains only 0xFFs
- * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data
+ * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also there was a data
* integrity error reported by the MTD driver
* (uncorrectable ECC error in case of NAND)
* UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC)
@@ -281,7 +281,7 @@ struct ubi_eba_leb_desc {
/**
* struct ubi_volume - UBI volume description data structure.
- * @dev: device object to make use of the the Linux device model
+ * @dev: device object to make use of the Linux device model
* @cdev: character device object to create character device
* @ubi: reference to the UBI device description object
* @vol_id: volume ID
@@ -439,7 +439,7 @@ struct ubi_debug_info {
/**
* struct ubi_device - UBI device description structure
- * @dev: UBI device object to use the the Linux device model
+ * @dev: UBI device object to use the Linux device model
* @cdev: character device object to create character device
* @ubi_num: UBI device number
* @ubi_name: UBI device name
@@ -937,7 +937,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
/* build.c */
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
- int vid_hdr_offset, int max_beb_per1024);
+ int vid_hdr_offset, int max_beb_per1024,
+ bool disable_fm);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 6ea95ade4ca6..8fcc0bdf0635 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -623,7 +623,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
* @ubi: UBI device description object
* @vol_id: volume ID
*
- * Returns zero if volume is all right and a a negative error code if not.
+ * Returns zero if volume is all right and a negative error code if not.
*/
static int self_check_volume(struct ubi_device *ubi, int vol_id)
{
@@ -776,7 +776,7 @@ fail:
* self_check_volumes - check information about all volumes.
* @ubi: UBI device description object
*
- * Returns zero if volumes are all right and a a negative error code if not.
+ * Returns zero if volumes are all right and a negative error code if not.
*/
static int self_check_volumes(struct ubi_device *ubi)
{
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 55bae06cf408..68eb0f21b3fe 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -376,7 +376,7 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
* refill_wl_user_pool().
* @ubi: UBI device description object
*
- * This function returns a a wear leveling entry in case of success and
+ * This function returns a wear leveling entry in case of success and
* NULL in case of failure.
*/
static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
@@ -429,7 +429,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
/**
* sync_erase - synchronously erase a physical eraseblock.
* @ubi: UBI device description object
- * @e: the the physical eraseblock to erase
+ * @e: the physical eraseblock to erase
* @torture: if the physical eraseblock has to be tortured
*
* This function returns zero in case of success and a negative error code in
@@ -1016,7 +1016,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
/*
* If the ubi->scrub tree is not empty, scrubbing is needed, and the
- * the WL worker has to be scheduled anyway.
+ * WL worker has to be scheduled anyway.
*/
if (!ubi->scrub.rb_node) {
#ifdef CONFIG_MTD_UBI_FASTMAP
@@ -1464,7 +1464,7 @@ static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
* ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to schedule
- * @force: dont't read the block, assume bitflips happened and take action.
+ * @force: don't read the block, assume bitflips happened and take action.
*
* This function reads the given eraseblock and checks if bitflips occured.
* In case of bitflips, the eraseblock is scheduled for scrubbing.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 24bb50dfd362..e84c49bf4d0c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4806,7 +4806,7 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
switch (packets_per_slave) {
case 0:
- slave_id = prandom_u32();
+ slave_id = get_random_u32();
break;
case 1:
slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 5669c92c93f7..c5c3b4e92f28 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -137,27 +137,42 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
struct qca8k_mgmt_eth_data *mgmt_eth_data;
struct qca8k_priv *priv = ds->priv;
struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 command;
u8 len, cmd;
+ int i;
mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
mgmt_eth_data = &priv->mgmt_eth_data;
- cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
- len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
+ command = get_unaligned_le32(&mgmt_ethhdr->command);
+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
/* Make sure the seq match the requested packet */
- if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
+ if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
mgmt_eth_data->ack = true;
if (cmd == MDIO_READ) {
- mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
+ u32 *val = mgmt_eth_data->data;
+
+ *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
/* Get the rest of the 12 byte of data.
* The read/write function will extract the requested data.
*/
- if (len > QCA_HDR_MGMT_DATA1_LEN)
- memcpy(mgmt_eth_data->data + 1, skb->data,
- QCA_HDR_MGMT_DATA2_LEN);
+ if (len > QCA_HDR_MGMT_DATA1_LEN) {
+ __le32 *data2 = (__le32 *)skb->data;
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ *val = get_unaligned_le32(data2);
+ val++;
+ data2++;
+ }
+ }
}
complete(&mgmt_eth_data->rw_done);
@@ -169,8 +184,10 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
struct qca_mgmt_ethhdr *mgmt_ethhdr;
unsigned int real_len;
struct sk_buff *skb;
- u32 *data2;
+ __le32 *data2;
+ u32 command;
u16 hdr;
+ int i;
skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
if (!skb)
@@ -199,20 +216,32 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
- mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
- mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
+ command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
+ command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
QCA_HDR_MGMT_CHECK_CODE_VAL);
+ put_unaligned_le32(command, &mgmt_ethhdr->command);
+
if (cmd == MDIO_WRITE)
- mgmt_ethhdr->mdio_data = *val;
+ put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
mgmt_ethhdr->hdr = htons(hdr);
data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
- if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
- memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ put_unaligned_le32(*val, data2);
+ data2++;
+ val++;
+ }
+ }
return skb;
}
@@ -220,9 +249,11 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
{
struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 seq;
+ seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
- mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
+ put_unaligned_le32(seq, &mgmt_ethhdr->seq);
}
static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
@@ -1487,9 +1518,9 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
struct qca8k_priv *priv = ds->priv;
const struct qca8k_mib_desc *mib;
struct mib_ethhdr *mib_ethhdr;
- int i, mib_len, offset = 0;
- u64 *data;
+ __le32 *data2;
u8 port;
+ int i;
mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
mib_eth_data = &priv->mib_eth_data;
@@ -1501,28 +1532,24 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk
if (port != mib_eth_data->req_port)
goto exit;
- data = mib_eth_data->data;
+ data2 = (__le32 *)skb->data;
for (i = 0; i < priv->info->mib_count; i++) {
mib = &ar8327_mib[i];
/* First 3 mib are present in the skb head */
if (i < 3) {
- data[i] = mib_ethhdr->data[i];
+ mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
continue;
}
- mib_len = sizeof(uint32_t);
-
/* Some mib are 64 bit wide */
if (mib->size == 2)
- mib_len = sizeof(uint64_t);
-
- /* Copy the mib value from packet to the */
- memcpy(data + i, skb->data + offset, mib_len);
+ mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
+ else
+ mib_eth_data->data[i] = get_unaligned_le32(data2);
- /* Set the offset for the next mib */
- offset += mib_len;
+ data2 += mib->size;
}
exit:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index eed98c10ca9d..04cf7684f1b0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3874,7 +3874,7 @@ static void bnxt_init_vnics(struct bnxt *bp)
if (bp->vnic_info[i].rss_hash_key) {
if (i == 0)
- prandom_bytes(vnic->rss_hash_key,
+ get_random_bytes(vnic->rss_hash_key,
HW_HASH_KEY_SIZE);
else
memcpy(vnic->rss_hash_key,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index a36803e79e92..8a6f788f6294 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -613,6 +613,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
{
+ bool rc = false;
u32 datalen;
u16 index;
u8 *buf;
@@ -632,20 +633,20 @@ static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
- goto err;
+ goto done;
}
if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
- goto err;
+ goto done;
}
- return true;
+ rc = true;
-err:
+done:
kfree(buf);
- return false;
+ return rc;
}
static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index e86503d97f32..2198e35d9e18 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4105,8 +4105,7 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
atomic_set(&cp->csk_tbl[i].ref_count, 0);
- port_id = prandom_u32();
- port_id %= CNIC_LOCAL_PORT_RANGE;
+ port_id = prandom_u32_max(CNIC_LOCAL_PORT_RANGE);
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
CNIC_LOCAL_PORT_MIN, port_id)) {
cnic_cm_free_mem(dev);
@@ -4165,7 +4164,7 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
{
u32 seed;
- seed = prandom_u32();
+ seed = get_random_u32();
cnic_ctx_wr(dev, 45, 0, seed);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index f90bfba4b303..c2e7037c7ba1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1063,7 +1063,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp));
rpl5->opt0 = cpu_to_be64(opt0);
rpl5->opt2 = cpu_to_be32(opt2);
- rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+ rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
@@ -1466,7 +1466,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
tp->write_seq = snd_isn;
tp->snd_nxt = snd_isn;
tp->snd_una = snd_isn;
- inet_sk(sk)->inet_id = prandom_u32();
+ inet_sk(sk)->inet_id = get_random_u16();
assign_rxopt(sk, opt);
if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 539992dad8ba..a4256087ac82 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -919,8 +919,8 @@ static int csk_wait_memory(struct chtls_dev *cdev,
current_timeo = *timeo_p;
noblock = (*timeo_p ? false : true);
if (csk_mem_free(cdev, sk)) {
- current_timeo = (prandom_u32() % (HZ / 5)) + 2;
- vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+ current_timeo = prandom_u32_max(HZ / 5) + 2;
+ vm_wait = prandom_u32_max(HZ / 5) + 2;
}
add_wait_queue(sk_sleep(sk), &wait);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 00fafc0f8512..430eccea8e5e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
- if (ret)
+ if (ret) {
+ put_device(&hdev->cls_dev);
return ret;
+ }
__module_get(THIS_MODULE);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 7e75706f76db..87f36d1ce800 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2183,9 +2183,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err)
goto rx_unwind;
- err = i40e_alloc_rx_bi(&rx_rings[i]);
- if (err)
- goto rx_unwind;
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2c07fa8ecfc8..b5dcd15ced36 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3566,12 +3566,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ring->vsi->type == I40E_VSI_MAIN)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- kfree(ring->rx_bi);
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
- ret = i40e_alloc_rx_bi_zc(ring);
- if (ret)
- return ret;
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
@@ -3589,9 +3585,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->queue_index);
} else {
- ret = i40e_alloc_rx_bi(ring);
- if (ret)
- return ret;
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == I40E_VSI_MAIN) {
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -13296,6 +13289,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
i40e_reset_and_rebuild(pf, true, true);
}
+ if (!i40e_enabled_xdp_vsi(vsi) && prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, true))
+ return -ENOMEM;
+ } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, false))
+ return -ENOMEM;
+ }
+
for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
@@ -13528,6 +13529,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
i40e_queue_pair_disable_irq(vsi, queue_pair);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 69e67eb6aea7..b97c95f89fa0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1457,14 +1457,6 @@ err:
return -ENOMEM;
}
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
-
- rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi ? 0 : -ENOMEM;
-}
-
static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
@@ -1593,6 +1585,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+ rx_ring->rx_bi =
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 41f86e9535a0..768290dc6f48 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -469,7 +469,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 6d4009e0cbd6..cd7b52fb6b46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -10,14 +10,6 @@
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
-
- rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
-}
-
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi_zc, 0,
@@ -30,6 +22,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
}
/**
+ * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
+ * @rx_ring: Current rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
+ sizeof(*rx_ring->rx_bi);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ rx_ring->rx_bi_zc = sw_ring;
+ } else {
+ kfree(rx_ring->rx_bi_zc);
+ rx_ring->rx_bi_zc = NULL;
+ rx_ring->rx_bi = sw_ring;
+ }
+ return 0;
+}
+
+/**
+ * i40e_realloc_rx_bi_zc - reallocate rx SW rings
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
+{
+ struct i40e_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
+ rx_ring = vsi->rx_rings[q];
+ if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
* i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
* certain ring/qid
* @vsi: Current VSI
@@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
if (err)
return err;
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
+ if (err)
+ return err;
+
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
+ if (err)
+ return err;
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index bb962987f300..821df248f8be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -32,7 +32,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
#endif /* _I40E_XSK_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index 9809f551fc2e..9ec5f38d38a8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -815,6 +815,7 @@ free_flowid:
cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
txsc->hw_flow_id, false);
fail:
+ kfree(txsc);
return ERR_PTR(ret);
}
@@ -870,6 +871,7 @@ free_flowid:
cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
rxsc->hw_flow_id, false);
fail:
+ kfree(rxsc);
return ERR_PTR(ret);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 4fba7cb0144b..7cd381530aa4 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -4060,19 +4060,23 @@ static int mtk_probe(struct platform_device *pdev)
eth->irq[i] = platform_get_irq(pdev, i);
if (eth->irq[i] < 0) {
dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
- return -ENXIO;
+ err = -ENXIO;
+ goto err_wed_exit;
}
}
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
if (IS_ERR(eth->clks[i])) {
- if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_wed_exit;
+ }
if (eth->soc->required_clks & BIT(i)) {
dev_err(&pdev->dev, "clock %s not found\n",
mtk_clks_source_name[i]);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_wed_exit;
}
eth->clks[i] = NULL;
}
@@ -4083,7 +4087,7 @@ static int mtk_probe(struct platform_device *pdev)
err = mtk_hw_init(eth);
if (err)
- return err;
+ goto err_wed_exit;
eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
@@ -4179,6 +4183,8 @@ err_free_dev:
mtk_free_dev(eth);
err_deinit_hw:
mtk_hw_deinit(eth);
+err_wed_exit:
+ mtk_wed_exit();
return err;
}
@@ -4198,6 +4204,7 @@ static int mtk_remove(struct platform_device *pdev)
phylink_disconnect_phy(mac->phylink);
}
+ mtk_wed_exit();
mtk_hw_deinit(eth);
netif_napi_del(&eth->tx_napi);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index ae00e572390d..2d8ca99f2467 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -397,12 +397,6 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
return 0;
}
-static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
-{
- return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
- FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
-}
-
static bool
mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
struct mtk_foe_entry *data)
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 099b6e0df619..65e01bf4b4d2 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -1072,16 +1072,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
pdev = of_find_device_by_node(np);
if (!pdev)
- return;
+ goto err_of_node_put;
get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return;
+ goto err_put_device;
regs = syscon_regmap_lookup_by_phandle(np, NULL);
if (IS_ERR(regs))
- return;
+ goto err_put_device;
rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
@@ -1124,8 +1124,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw_list[index] = hw;
+ mutex_unlock(&hw_lock);
+
+ return;
+
unlock:
mutex_unlock(&hw_lock);
+err_put_device:
+ put_device(&pdev->dev);
+err_of_node_put:
+ of_node_put(np);
}
void mtk_wed_exit(void)
@@ -1146,6 +1154,7 @@ void mtk_wed_exit(void)
hw_list[i] = NULL;
debugfs_remove(hw->debugfs_dir);
put_device(hw->dev);
+ of_node_put(hw->node);
kfree(hw);
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 5d58fd99be3c..19d4848df17d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -2817,11 +2817,15 @@ err_out:
* than the full array, but leave the qcq shells in place
*/
for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
- lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
- ionic_qcq_free(lif, lif->txqcqs[i]);
+ if (lif->txqcqs && lif->txqcqs[i]) {
+ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->txqcqs[i]);
+ }
- lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
- ionic_qcq_free(lif, lif->rxqcqs[i]);
+ if (lif->rxqcqs && lif->rxqcqs[i]) {
+ lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->rxqcqs[i]);
+ }
}
if (err)
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 023682cd2768..9e59669a93dd 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -129,7 +129,7 @@ static int rocker_reg_test(const struct rocker *rocker)
u64 test_reg;
u64 rnd;
- rnd = prandom_u32();
+ rnd = get_random_u32();
rnd >>= 1;
rocker_write32(rocker, TEST_REG, rnd);
test_reg = rocker_read32(rocker, TEST_REG);
@@ -139,9 +139,9 @@ static int rocker_reg_test(const struct rocker *rocker)
return -EIO;
}
- rnd = prandom_u32();
+ rnd = get_random_u32();
rnd <<= 31;
- rnd |= prandom_u32();
+ rnd |= get_random_u32();
rocker_write64(rocker, TEST_REG64, rnd);
test_reg = rocker_read64(rocker, TEST_REG64);
if (test_reg != rnd * 2) {
@@ -224,7 +224,7 @@ static int rocker_dma_test_offset(const struct rocker *rocker,
if (err)
goto unmap;
- prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
+ get_random_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
expect[i] = ~buf[i];
err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index d1e1aa19a68e..7022fb2005a2 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3277,6 +3277,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
bool was_enabled = efx->port_enabled;
int rc;
+#ifdef CONFIG_SFC_SRIOV
+ /* If this function is a VF and we have access to the parent PF,
+ * then use the PF control path to attempt to change the VF MAC address.
+ */
+ if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
+ struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 mac[ETH_ALEN];
+
+ /* net_dev->dev_addr can be zeroed by efx_net_stop in
+ * efx_ef10_sriov_set_vf_mac, so pass in a copy.
+ */
+ ether_addr_copy(mac, efx->net_dev->dev_addr);
+
+ rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac);
+ if (!rc)
+ return 0;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Updating VF mac via PF failed (%d), setting directly\n",
+ rc);
+ }
+#endif
+
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
@@ -3297,40 +3321,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_net_open(efx->net_dev);
efx_device_attach_if_not_resetting(efx);
-#ifdef CONFIG_SFC_SRIOV
- if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
-
- if (rc == -EPERM) {
- struct efx_nic *efx_pf;
-
- /* Switch to PF and change MAC address on vport */
- efx_pf = pci_get_drvdata(pci_dev_pf);
-
- rc = efx_ef10_sriov_set_vf_mac(efx_pf,
- nic_data->vf_index,
- efx->net_dev->dev_addr);
- } else if (!rc) {
- struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
- struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
- unsigned int i;
-
- /* MAC address successfully changed by VF (with MAC
- * spoofing) so update the parent PF if possible.
- */
- for (i = 0; i < efx_pf->vf_count; ++i) {
- struct ef10_vf *vf = nic_data->vf + i;
-
- if (vf->efx == efx) {
- ether_addr_copy(vf->mac,
- efx->net_dev->dev_addr);
- return 0;
- }
- }
- }
- } else
-#endif
if (rc == -EPERM) {
netif_err(efx, drv, efx->net_dev,
"Cannot change MAC address; use sfboot to enable"
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index be72e71da027..5f201a547e5b 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -162,9 +162,9 @@ struct efx_filter_spec {
u32 priority:2;
u32 flags:6;
u32 dmaq_id:12;
- u32 vport_id;
u32 rss_context;
- __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ u32 vport_id;
+ __be16 outer_vid;
__be16 inner_vid;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 4826e6a7e4ce..9220afeddee8 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -660,17 +660,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left,
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
return false;
- return memcmp(&left->outer_vid, &right->outer_vid,
+ return memcmp(&left->vport_id, &right->vport_id,
sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) == 0;
+ offsetof(struct efx_filter_spec, vport_id)) == 0;
}
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
{
- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
- return jhash2((const u32 *)&spec->outer_vid,
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
+ return jhash2((const u32 *)&spec->vport_id,
(sizeof(struct efx_filter_spec) -
- offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ offsetof(struct efx_filter_spec, vport_id)) / 4,
0);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 65c96773c6d2..8273e6a175c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1214,6 +1214,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (priv->plat->tx_queues_to_use > 1)
priv->phylink_config.mac_capabilities &=
~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ priv->phylink_config.mac_managed_pm = true;
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 91f10f746dff..1c16548415cd 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1328,7 +1328,7 @@ static int happy_meal_init(struct happy_meal *hp)
void __iomem *erxregs = hp->erxregs;
void __iomem *bregs = hp->bigmacregs;
void __iomem *tregs = hp->tcvregs;
- const char *bursts;
+ const char *bursts = "64";
u32 regtmp, rxcfg;
/* If auto-negotiation timer is running, kill it. */
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 3e69079ed694..791b4a53d69f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -438,7 +438,7 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
if ((--bc->hdlctx.slotcnt) > 0)
return 0;
bc->hdlctx.slotcnt = bc->ch_params.slottime;
- if ((prandom_u32() % 256) > bc->ch_params.ppersist)
+ if (get_random_u8() > bc->ch_params.ppersist)
return 0;
}
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index a6184d6c7b15..2263029d1a20 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -377,7 +377,7 @@ void hdlcdrv_arbitrate(struct net_device *dev, struct hdlcdrv_state *s)
if ((--s->hdlctx.slotcnt) > 0)
return;
s->hdlctx.slotcnt = s->ch_params.slottime;
- if ((prandom_u32() % 256) > s->ch_params.ppersist)
+ if (get_random_u8() > s->ch_params.ppersist)
return;
start_tx(dev, s);
}
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 980f2be32f05..2ed2f836f09a 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -626,7 +626,7 @@ static void yam_arbitrate(struct net_device *dev)
yp->slotcnt = yp->slot / 10;
/* is random > persist ? */
- if ((prandom_u32() % 256) > yp->pers)
+ if (get_random_u8() > yp->pers)
return;
yam_start_tx(dev, yp);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 11f767a20444..eea777ec2541 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -20,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/ucs2_string.h>
+#include <linux/string.h>
#include "hyperv_net.h"
#include "netvsc_trace.h"
@@ -335,9 +336,10 @@ static void rndis_filter_receive_response(struct net_device *ndev,
if (resp->msg_len <=
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
- memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
+ unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
data + RNDIS_HEADER_SIZE + sizeof(*req_id),
- resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id));
+ resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id),
+ "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request");
if (request->request_msg.ndis_msg_type ==
RNDIS_MSG_QUERY && request->request_msg.msg.
query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8f8f73099de8..c5cfe8555199 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -361,7 +361,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
}
spin_unlock(&port->bc_queue.lock);
- schedule_work(&port->bc_work);
+ queue_work(system_unbound_wq, &port->bc_work);
if (err)
goto free_nskb;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 9e9adde335c8..349b7b1dbbf2 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1758,7 +1758,7 @@ static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev)
{
- u16 seed_value = (prandom_u32() % QCA808X_MASTER_SLAVE_SEED_RANGE);
+ u16 seed_value = prandom_u32_max(QCA808X_MASTER_SLAVE_SEED_RANGE);
return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
QCA808X_MASTER_SLAVE_SEED_CFG,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 8549e0e356c9..b60db8b6f477 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -254,8 +254,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_EEE_ERROR_CHANGE_INT_EN);
if (!dp83822->fx_enabled)
- misr_status |= DP83822_MDI_XOVER_INT_EN |
- DP83822_ANEG_ERR_INT_EN |
+ misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 6939563d3b7c..417527f8bbf5 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -853,6 +853,14 @@ static int dp83867_config_init(struct phy_device *phydev)
else
val &= ~DP83867_SGMII_TYPE;
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
+
+ /* This is a SW workaround for link instability if RX_CTRL is
+ * not strapped to mode 3 or 4 in HW. This is required for SGMII
+ * in addition to clearing bit 7, handled above.
+ */
+ if (dp83867->rxctrl_strap_quirk)
+ phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
+ BIT(8));
}
val = phy_read(phydev, DP83867_CFG3);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index ef10f5a70e5a..62106c9e9a9d 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1678,6 +1678,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
if (phy_interrupt_is_valid(phy))
phy_request_interrupt(phy);
+ if (pl->config->mac_managed_pm)
+ phy->mac_managed_pm = true;
+
return 0;
}
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index 41db10f9be49..19eac00b2381 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -284,7 +284,7 @@ static __init bool randomized_test(void)
mutex_lock(&mutex);
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
- prandom_bytes(ip, 4);
+ get_random_bytes(ip, 4);
cidr = prandom_u32_max(32) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
@@ -299,7 +299,7 @@ static __init bool randomized_test(void)
}
for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
memcpy(mutated, ip, 4);
- prandom_bytes(mutate_mask, 4);
+ get_random_bytes(mutate_mask, 4);
mutate_amount = prandom_u32_max(32);
for (k = 0; k < mutate_amount / 8; ++k)
mutate_mask[k] = 0xff;
@@ -310,7 +310,7 @@ static __init bool randomized_test(void)
for (k = 0; k < 4; ++k)
mutated[k] = (mutated[k] & mutate_mask[k]) |
(~mutate_mask[k] &
- prandom_u32_max(256));
+ get_random_u8());
cidr = prandom_u32_max(32) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v4(&t,
@@ -328,7 +328,7 @@ static __init bool randomized_test(void)
}
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
- prandom_bytes(ip, 16);
+ get_random_bytes(ip, 16);
cidr = prandom_u32_max(128) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
@@ -343,7 +343,7 @@ static __init bool randomized_test(void)
}
for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
memcpy(mutated, ip, 16);
- prandom_bytes(mutate_mask, 16);
+ get_random_bytes(mutate_mask, 16);
mutate_amount = prandom_u32_max(128);
for (k = 0; k < mutate_amount / 8; ++k)
mutate_mask[k] = 0xff;
@@ -354,7 +354,7 @@ static __init bool randomized_test(void)
for (k = 0; k < 4; ++k)
mutated[k] = (mutated[k] & mutate_mask[k]) |
(~mutate_mask[k] &
- prandom_u32_max(256));
+ get_random_u8());
cidr = prandom_u32_max(128) + 1;
peer = peers[prandom_u32_max(NUM_PEERS)];
if (wg_allowedips_insert_v6(&t,
@@ -381,13 +381,13 @@ static __init bool randomized_test(void)
for (j = 0;; ++j) {
for (i = 0; i < NUM_QUERIES; ++i) {
- prandom_bytes(ip, 4);
+ get_random_bytes(ip, 4);
if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
pr_err("allowedips random v4 self-test: FAIL\n");
goto free;
}
- prandom_bytes(ip, 16);
+ get_random_bytes(ip, 16);
if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
pr_err("allowedips random v6 self-test: FAIL\n");
goto free;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 479041f070f9..10d9d9c63b28 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1128,7 +1128,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work)
if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
/* 100ms ~ 300ms */
err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
- 100 * (1 + prandom_u32() % 3));
+ 100 * (1 + prandom_u32_max(3)));
else
err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index d0a7465be586..170c61c8136c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -177,7 +177,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
for (i = 0; i < ETH_ALEN; i++) {
pfn_mac.mac[i] &= mac_mask[i];
- pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
+ pfn_mac.mac[i] |= get_random_u8() & ~(mac_mask[i]);
}
/* Clear multi bit */
pfn_mac.mac[0] &= 0xFE;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index ed586e6d7d64..de0c545d50fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1099,7 +1099,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
iwl_mvm_mac_ap_iterator, &data);
if (data.beacon_device_ts) {
- u32 rand = (prandom_u32() % (64 - 36)) + 36;
+ u32 rand = prandom_u32_max(64 - 36) + 36;
mvmvif->ap_beacon_time = data.beacon_device_ts +
ieee80211_tu_to_usec(data.beacon_int * rand /
100);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 535995e8279f..bcd564dc3554 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -239,7 +239,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
tx_info->pkt_len = pkt_len;
mwifiex_form_mgmt_frame(skb, buf, len);
- *cookie = prandom_u32() | 1;
+ *cookie = get_random_u32() | 1;
if (ieee80211_is_action(mgmt->frame_control))
skb = mwifiex_clone_skb_for_tx_status(priv,
@@ -303,7 +303,7 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
duration);
if (!ret) {
- *cookie = prandom_u32() | 1;
+ *cookie = get_random_u32() | 1;
priv->roc_cfg.cookie = *cookie;
priv->roc_cfg.chan = *chan;
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index b89047965e78..9bbfff803357 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1161,7 +1161,7 @@ static int mgmt_tx(struct wiphy *wiphy,
const u8 *vendor_ie;
int ret = 0;
- *cookie = prandom_u32();
+ *cookie = get_random_u32();
priv->tx_cookie = *cookie;
mgmt = (const struct ieee80211_mgmt *)buf;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index bfdf03bfa6c5..73e6f9408b51 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -449,7 +449,7 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
const struct ieee80211_mgmt *mgmt_frame = (void *)params->buf;
- u32 short_cookie = prandom_u32();
+ u32 short_cookie = get_random_u32();
u16 flags = 0;
u16 freq;
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 5a3e7a626702..4a9e4b5d3547 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -1594,7 +1594,7 @@ static int cw1200_get_prio_queue(struct cw1200_common *priv,
edca = &priv->edca.params[i];
score = ((edca->aifns + edca->cwmin) << 16) +
((edca->cwmax - edca->cwmin) *
- (get_random_int() & 0xFFFF));
+ get_random_u16());
if (score < best && (winner < 0 || i != 3)) {
best = score;
winner = i;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 3e3922d4c788..28c0f06e311f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6100,7 +6100,7 @@ static int wl1271_register_hw(struct wl1271 *wl)
wl1271_warning("Fuse mac address is zero. using random mac");
/* Use TI oui and a random nic */
oui_addr = WLCORE_TI_OUI_ADDRESS;
- nic_addr = get_random_int();
+ nic_addr = get_random_u32();
} else {
oui_addr = wl->fuse_oui_addr;
/* fuse has the BD_ADDR, the WLAN addresses are the next two */
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index ff09a8cedf93..2397a903d8f5 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -311,7 +311,7 @@ err_unreg_dev:
return ERR_PTR(err);
err_free_dev:
- kfree(dev);
+ put_device(&dev->dev);
return ERR_PTR(err);
}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index bbe5099c836d..c60ec0b373c5 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -170,15 +170,12 @@ EXPORT_SYMBOL(nvdimm_namespace_disk_name);
const uuid_t *nd_dev_to_uuid(struct device *dev)
{
- if (!dev)
- return &uuid_null;
-
- if (is_namespace_pmem(dev)) {
+ if (dev && is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return nspm->uuid;
- } else
- return &uuid_null;
+ }
+ return &uuid_null;
}
EXPORT_SYMBOL(nd_dev_to_uuid);
@@ -388,7 +385,7 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
*
* BLK-space is valid as long as it does not precede a PMEM
* allocation in a given region. PMEM-space must be contiguous
- * and adjacent to an existing existing allocation (if one
+ * and adjacent to an existing allocation (if one
* exists). If reserving PMEM any space is valid.
*/
static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
@@ -839,7 +836,6 @@ static ssize_t size_store(struct device *dev,
{
struct nd_region *nd_region = to_nd_region(dev->parent);
unsigned long long val;
- uuid_t **uuid = NULL;
int rc;
rc = kstrtoull(buf, 0, &val);
@@ -853,16 +849,12 @@ static ssize_t size_store(struct device *dev,
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
- if (is_namespace_pmem(dev)) {
+ /* setting size zero == 'delete namespace' */
+ if (rc == 0 && val == 0 && is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
- uuid = &nspm->uuid;
- }
-
- if (rc == 0 && val == 0 && uuid) {
- /* setting size zero == 'delete namespace' */
- kfree(*uuid);
- *uuid = NULL;
+ kfree(nspm->uuid);
+ nspm->uuid = NULL;
}
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 473a71bbd9c9..e0875d369762 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -509,16 +509,13 @@ static ssize_t align_store(struct device *dev,
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long val, dpa;
- u32 remainder;
+ u32 mappings, remainder;
int rc;
rc = kstrtoul(buf, 0, &val);
if (rc)
return rc;
- if (!nd_region->ndr_mappings)
- return -ENXIO;
-
/*
* Ensure space-align is evenly divisible by the region
* interleave-width because the kernel typically has no facility
@@ -526,7 +523,8 @@ static ssize_t align_store(struct device *dev,
* contribute to the tail capacity in system-physical-address
* space for the namespace.
*/
- dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder);
+ mappings = max_t(u32, 1, nd_region->ndr_mappings);
+ dpa = div_u64_rem(val, mappings, &remainder);
if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
|| val > region_size(nd_region) || remainder)
return -EINVAL;
@@ -1096,7 +1094,7 @@ int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
return rc;
}
/**
- * nvdimm_flush - flush any posted write queues between the cpu and pmem media
+ * generic_nvdimm_flush() - flush any posted write queues between the cpu and pmem media
* @nd_region: interleaved pmem region
*/
int generic_nvdimm_flush(struct nd_region *nd_region)
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index b5aa55c61461..8aefb60c42ff 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -408,7 +408,7 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return rc;
}
-void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
+static void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
int rc;
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 04bd28f17dcc..d90e4f0c08b7 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -23,7 +23,7 @@ u32 nvme_auth_get_seqnum(void)
mutex_lock(&nvme_dhchap_mutex);
if (!nvme_dhchap_seqnum)
- nvme_dhchap_seqnum = prandom_u32();
+ nvme_dhchap_seqnum = get_random_u32();
else {
nvme_dhchap_seqnum++;
if (!nvme_dhchap_seqnum)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 00f2f81e20fa..0ea7e441e080 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -182,6 +182,7 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
for_each_node(node)
rcu_assign_pointer(head->current_path[node], NULL);
+ kblockd_schedule_work(&head->requeue_work);
}
static bool nvme_path_is_disabled(struct nvme_ns *ns)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5b796efa325b..bcbef6bc5672 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3521,12 +3521,16 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 5ad0ab2853a4..6e079abb22ee 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -996,7 +996,7 @@ static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- cancel_work_sync(&ctrl->err_work);
+ flush_work(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->reconnect_work);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 93e2e313fa70..1eed0fc26b3a 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2181,7 +2181,7 @@ out_fail:
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
{
- cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
+ flush_work(&to_tcp_ctrl(ctrl)->err_work);
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
}
diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c
index f54a6f450391..f0cb31198a8f 100644
--- a/drivers/parisc/eisa_enumerator.c
+++ b/drivers/parisc/eisa_enumerator.c
@@ -393,7 +393,7 @@ static int parse_slot_config(int slot,
}
if (p0 + function_len < pos) {
- printk(KERN_ERR "eisa_enumerator: function %d length mis-match "
+ printk(KERN_ERR "eisa_enumerator: function %d length mismatch "
"got %d, expected %d\n",
num_func, pos-p0, function_len);
res=-1;
@@ -407,13 +407,13 @@ static int parse_slot_config(int slot,
}
if (pos != es->config_data_length) {
- printk(KERN_ERR "eisa_enumerator: config data length mis-match got %d, expected %d\n",
+ printk(KERN_ERR "eisa_enumerator: config data length mismatch got %d, expected %d\n",
pos, es->config_data_length);
res=-1;
}
if (num_func != es->num_functions) {
- printk(KERN_ERR "eisa_enumerator: number of functions mis-match got %d, expected %d\n",
+ printk(KERN_ERR "eisa_enumerator: number of functions mismatch got %d, expected %d\n",
num_func, es->num_functions);
res=-2;
}
@@ -451,7 +451,7 @@ static int init_slot(int slot, struct eeprom_eisa_slot_info *es)
}
if (es->eisa_slot_id != id) {
print_eisa_id(id_string, id);
- printk(KERN_ERR "EISA slot %d id mis-match: got %s",
+ printk(KERN_ERR "EISA slot %d id mismatch: got %s",
slot, id_string);
print_eisa_id(id_string, es->eisa_slot_id);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index dc6a30ee6edf..b4096598dbcb 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1768,10 +1768,7 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
}
res->end = res->start + new_size - 1;
-
- /* If the resource is part of the add_list remove it now */
- if (add_list)
- remove_from_list(add_list, res);
+ remove_from_list(add_list, res);
}
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
@@ -1926,8 +1923,6 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
if (!bridge->is_hotplug_bridge)
return;
- pci_dbg(bridge, "distributing available resources\n");
-
/* Take the initial extra resources from the hotplug port */
available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
@@ -1939,59 +1934,6 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
available_mmio_pref);
}
-static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
-{
- const struct resource *r;
-
- /*
- * Check the child device's resources and if they are not yet
- * assigned it means we are configuring them (not the boot
- * firmware) so we should be able to extend the upstream
- * bridge's (that's the hotplug downstream PCIe port) resources
- * in the same way we do with the normal hotplug case.
- */
- r = &dev->resource[PCI_BRIDGE_IO_WINDOW];
- if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN))
- return false;
- r = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
- if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN))
- return false;
- r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
- if (!r->flags || !(r->flags & IORESOURCE_STARTALIGN))
- return false;
-
- return true;
-}
-
-static void pci_root_bus_distribute_available_resources(struct pci_bus *bus,
- struct list_head *add_list)
-{
- struct pci_dev *dev, *bridge = bus->self;
-
- for_each_pci_bridge(dev, bus) {
- struct pci_bus *b;
-
- b = dev->subordinate;
- if (!b)
- continue;
-
- /*
- * Need to check "bridge" here too because it is NULL
- * in case of root bus.
- */
- if (bridge && pci_bridge_resources_not_assigned(dev)) {
- pci_bridge_distribute_available_resources(bridge, add_list);
- /*
- * There is only PCIe upstream port on the bus
- * so we don't need to go futher.
- */
- return;
- }
-
- pci_root_bus_distribute_available_resources(b, add_list);
- }
-}
-
/*
* First try will not touch PCI bridge res.
* Second and later try will clear small leaf bridge res.
@@ -2031,8 +1973,6 @@ again:
*/
__pci_bus_size_bridges(bus, add_list);
- pci_root_bus_distribute_available_resources(bus, add_list);
-
/* Depth last, allocate resources and update the hardware. */
__pci_bus_assign_resources(bus, add_list, &fail_head);
if (add_list)
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 44c07ea487f4..341010f20b77 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -185,7 +185,7 @@ config APPLE_M1_CPU_PMU
config ALIBABA_UNCORE_DRW_PMU
tristate "Alibaba T-Head Yitian 710 DDR Sub-system Driveway PMU driver"
- depends on ARM64 || COMPILE_TEST
+ depends on (ARM64 && ACPI) || COMPILE_TEST
help
Support for Driveway PMU events monitoring on Yitian 710 DDR
Sub-system.
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index 82729b874f09..a7689fecb49d 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -658,8 +658,8 @@ static int ali_drw_pmu_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drw_pmu->cfg_base = devm_ioremap_resource(&pdev->dev, res);
- if (!drw_pmu->cfg_base)
- return -ENOMEM;
+ if (IS_ERR(drw_pmu->cfg_base))
+ return PTR_ERR(drw_pmu->cfg_base);
name = devm_kasprintf(drw_pmu->dev, GFP_KERNEL, "ali_drw_%llx",
(u64) (res->start >> ALI_DRW_PMU_PA_SHIFT));
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 15e5a47be7d5..3852c18362f5 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -652,8 +652,11 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
- /* Enable the access for TIME csr only from the user mode now */
- csr_write(CSR_SCOUNTEREN, 0x2);
+ /*
+ * Enable the access for CYCLE, TIME, and INSTRET CSRs from userspace,
+ * as is necessary to maintain uABI compatibility.
+ */
+ csr_write(CSR_SCOUNTEREN, 0x7);
/* Stop all the counters so that they can be enabled from perf */
pmu_sbi_stop_all(pmu);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index b8de25118ad0..bb63edb507da 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -423,6 +423,7 @@ config RTC_DRV_ISL1208
config RTC_DRV_ISL12022
tristate "Intersil ISL12022"
+ select REGMAP_I2C
help
If you say yes here you get support for the
Intersil ISL12022 RTC chip.
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index bdb1df843c78..610413b4e9ca 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1352,10 +1352,10 @@ static void cmos_check_acpi_rtc_status(struct device *dev,
static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
{
- cmos_wake_setup(&pnp->dev);
+ int irq, ret;
if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) {
- unsigned int irq = 0;
+ irq = 0;
#ifdef CONFIG_X86
/* Some machines contain a PNP entry for the RTC, but
* don't define the IRQ. It should always be safe to
@@ -1364,13 +1364,17 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
if (nr_legacy_irqs())
irq = RTC_IRQ;
#endif
- return cmos_do_probe(&pnp->dev,
- pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
} else {
- return cmos_do_probe(&pnp->dev,
- pnp_get_resource(pnp, IORESOURCE_IO, 0),
- pnp_irq(pnp, 0));
+ irq = pnp_irq(pnp, 0);
}
+
+ ret = cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), irq);
+ if (ret)
+ return ret;
+
+ cmos_wake_setup(&pnp->dev);
+
+ return 0;
}
static void cmos_pnp_remove(struct pnp_dev *pnp)
@@ -1454,10 +1458,9 @@ static inline void cmos_of_init(struct platform_device *pdev) {}
static int __init cmos_platform_probe(struct platform_device *pdev)
{
struct resource *resource;
- int irq;
+ int irq, ret;
cmos_of_init(pdev);
- cmos_wake_setup(&pdev->dev);
if (RTC_IOMAPPED)
resource = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1467,7 +1470,13 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
if (irq < 0)
irq = -1;
- return cmos_do_probe(&pdev->dev, resource, irq);
+ ret = cmos_do_probe(&pdev->dev, resource, irq);
+ if (ret)
+ return ret;
+
+ cmos_wake_setup(&pdev->dev);
+
+ return 0;
}
static int cmos_platform_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index a24331ba8a5f..5db9c737c022 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -132,7 +132,7 @@ ds1685_rtc_bin2bcd(struct ds1685_priv *rtc, u8 val, u8 bin_mask, u8 bcd_mask)
}
/**
- * s1685_rtc_check_mday - check validity of the day of month.
+ * ds1685_rtc_check_mday - check validity of the day of month.
* @rtc: pointer to the ds1685 rtc structure.
* @mday: day of month.
*
diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c
index c2717bb52b2b..c828bc8e05b9 100644
--- a/drivers/rtc/rtc-gamecube.c
+++ b/drivers/rtc/rtc-gamecube.c
@@ -265,18 +265,17 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d)
* SRAM address as on previous consoles.
*/
ret = regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias);
- if (ret) {
- pr_err("failed to get the RTC bias\n");
- iounmap(hw_srnprot);
- return -1;
- }
/* Reset SRAM access to how it was before, our job here is done. */
if (old != 0x7bf)
iowrite32be(old, hw_srnprot);
+
iounmap(hw_srnprot);
- return 0;
+ if (ret)
+ pr_err("failed to get the RTC bias\n");
+
+ return ret;
}
static const struct regmap_range rtc_rd_ranges[] = {
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 79461ded1a48..ca677c4265e6 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/regmap.h>
/* ISL register offsets */
#define ISL12022_REG_SC 0x00
@@ -42,83 +43,32 @@ static struct i2c_driver isl12022_driver;
struct isl12022 {
struct rtc_device *rtc;
-
- bool write_enabled; /* true if write enable is set */
+ struct regmap *regmap;
};
-
-static int isl12022_read_regs(struct i2c_client *client, uint8_t reg,
- uint8_t *data, size_t n)
-{
- struct i2c_msg msgs[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = data
- }, /* setup read ptr */
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = n,
- .buf = data
- }
- };
-
- int ret;
-
- data[0] = reg;
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "%s: read error, ret=%d\n",
- __func__, ret);
- return -EIO;
- }
-
- return 0;
-}
-
-
-static int isl12022_write_reg(struct i2c_client *client,
- uint8_t reg, uint8_t val)
-{
- uint8_t data[2] = { reg, val };
- int err;
-
- err = i2c_master_send(client, data, sizeof(data));
- if (err != sizeof(data)) {
- dev_err(&client->dev,
- "%s: err=%d addr=%02x, data=%02x\n",
- __func__, err, data[0], data[1]);
- return -EIO;
- }
-
- return 0;
-}
-
-
/*
* In the routines that deal directly with the isl12022 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
*/
static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct isl12022 *isl12022 = dev_get_drvdata(dev);
+ struct regmap *regmap = isl12022->regmap;
uint8_t buf[ISL12022_REG_INT + 1];
int ret;
- ret = isl12022_read_regs(client, ISL12022_REG_SC, buf, sizeof(buf));
+ ret = regmap_bulk_read(regmap, ISL12022_REG_SC, buf, sizeof(buf));
if (ret)
return ret;
if (buf[ISL12022_REG_SR] & (ISL12022_SR_LBAT85 | ISL12022_SR_LBAT75)) {
- dev_warn(&client->dev,
+ dev_warn(dev,
"voltage dropped below %u%%, "
"date and time is not reliable.\n",
buf[ISL12022_REG_SR] & ISL12022_SR_LBAT85 ? 85 : 75);
}
- dev_dbg(&client->dev,
+ dev_dbg(dev,
"%s: raw data is sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, mon=%02x, year=%02x, wday=%02x, "
"sr=%02x, int=%02x",
@@ -141,65 +91,25 @@ static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_mon = bcd2bin(buf[ISL12022_REG_MO] & 0x1F) - 1;
tm->tm_year = bcd2bin(buf[ISL12022_REG_YR]) + 100;
- dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
- "mday=%d, mon=%d, year=%d, wday=%d\n",
- __func__,
- tm->tm_sec, tm->tm_min, tm->tm_hour,
- tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+ dev_dbg(dev, "%s: %ptR\n", __func__, tm);
return 0;
}
static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct isl12022 *isl12022 = i2c_get_clientdata(client);
- size_t i;
+ struct isl12022 *isl12022 = dev_get_drvdata(dev);
+ struct regmap *regmap = isl12022->regmap;
int ret;
uint8_t buf[ISL12022_REG_DW + 1];
- dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
- "mday=%d, mon=%d, year=%d, wday=%d\n",
- __func__,
- tm->tm_sec, tm->tm_min, tm->tm_hour,
- tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
-
- if (!isl12022->write_enabled) {
-
- ret = isl12022_read_regs(client, ISL12022_REG_INT, buf, 1);
- if (ret)
- return ret;
-
- /* Check if WRTC (write rtc enable) is set factory default is
- * 0 (not set) */
- if (!(buf[0] & ISL12022_INT_WRTC)) {
- dev_info(&client->dev,
- "init write enable and 24 hour format\n");
-
- /* Set the write enable bit. */
- ret = isl12022_write_reg(client,
- ISL12022_REG_INT,
- buf[0] | ISL12022_INT_WRTC);
- if (ret)
- return ret;
-
- /* Write to any RTC register to start RTC, we use the
- * HR register, setting the MIL bit to use the 24 hour
- * format. */
- ret = isl12022_read_regs(client, ISL12022_REG_HR,
- buf, 1);
- if (ret)
- return ret;
-
- ret = isl12022_write_reg(client,
- ISL12022_REG_HR,
- buf[0] | ISL12022_HR_MIL);
- if (ret)
- return ret;
- }
-
- isl12022->write_enabled = true;
- }
+ dev_dbg(dev, "%s: %ptR\n", __func__, tm);
+
+ /* Ensure the write enable bit is set. */
+ ret = regmap_update_bits(regmap, ISL12022_REG_INT,
+ ISL12022_INT_WRTC, ISL12022_INT_WRTC);
+ if (ret)
+ return ret;
/* hours, minutes and seconds */
buf[ISL12022_REG_SC] = bin2bcd(tm->tm_sec);
@@ -216,15 +126,8 @@ static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
buf[ISL12022_REG_DW] = tm->tm_wday & 0x07;
- /* write register's data */
- for (i = 0; i < ARRAY_SIZE(buf); i++) {
- ret = isl12022_write_reg(client, ISL12022_REG_SC + i,
- buf[ISL12022_REG_SC + i]);
- if (ret)
- return -EIO;
- }
-
- return 0;
+ return regmap_bulk_write(isl12022->regmap, ISL12022_REG_SC,
+ buf, sizeof(buf));
}
static const struct rtc_class_ops isl12022_rtc_ops = {
@@ -232,6 +135,12 @@ static const struct rtc_class_ops isl12022_rtc_ops = {
.set_time = isl12022_rtc_set_time,
};
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_write = true,
+};
+
static int isl12022_probe(struct i2c_client *client)
{
struct isl12022 *isl12022;
@@ -243,13 +152,23 @@ static int isl12022_probe(struct i2c_client *client)
GFP_KERNEL);
if (!isl12022)
return -ENOMEM;
+ dev_set_drvdata(&client->dev, isl12022);
+
+ isl12022->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(isl12022->regmap)) {
+ dev_err(&client->dev, "regmap allocation failed\n");
+ return PTR_ERR(isl12022->regmap);
+ }
+
+ isl12022->rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(isl12022->rtc))
+ return PTR_ERR(isl12022->rtc);
- i2c_set_clientdata(client, isl12022);
+ isl12022->rtc->ops = &isl12022_rtc_ops;
+ isl12022->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ isl12022->rtc->range_max = RTC_TIMESTAMP_END_2099;
- isl12022->rtc = devm_rtc_device_register(&client->dev,
- isl12022_driver.driver.name,
- &isl12022_rtc_ops, THIS_MODULE);
- return PTR_ERR_OR_ZERO(isl12022->rtc);
+ return devm_rtc_register_device(isl12022->rtc);
}
#ifdef CONFIG_OF
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 6e51df72fd65..c383719292c7 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -257,11 +257,6 @@ static void jz4740_rtc_power_off(void)
kernel_halt();
}
-static void jz4740_rtc_clk_disable(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static const struct of_device_id jz4740_rtc_of_match[] = {
{ .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
{ .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 },
@@ -329,23 +324,9 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
- clk = devm_clk_get(dev, "rtc");
- if (IS_ERR(clk)) {
- dev_err(dev, "Failed to get RTC clock\n");
- return PTR_ERR(clk);
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "Failed to enable clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev, jz4740_rtc_clk_disable, clk);
- if (ret) {
- dev_err(dev, "Failed to register devm action\n");
- return ret;
- }
+ clk = devm_clk_get_enabled(dev, "rtc");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Failed to get RTC clock\n");
spin_lock_init(&rtc->lock);
diff --git a/drivers/rtc/rtc-mpfs.c b/drivers/rtc/rtc-mpfs.c
index f14d1925e0c9..2a479d44f198 100644
--- a/drivers/rtc/rtc-mpfs.c
+++ b/drivers/rtc/rtc-mpfs.c
@@ -193,23 +193,6 @@ static int mpfs_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static inline struct clk *mpfs_rtc_init_clk(struct device *dev)
-{
- struct clk *clk;
- int ret;
-
- clk = devm_clk_get(dev, "rtc");
- if (IS_ERR(clk))
- return clk;
-
- ret = clk_prepare_enable(clk);
- if (ret)
- return ERR_PTR(ret);
-
- devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare, clk);
- return clk;
-}
-
static irqreturn_t mpfs_rtc_wakeup_irq_handler(int irq, void *dev)
{
struct mpfs_rtc_dev *rtcdev = dev;
@@ -233,7 +216,7 @@ static int mpfs_rtc_probe(struct platform_device *pdev)
{
struct mpfs_rtc_dev *rtcdev;
struct clk *clk;
- u32 prescaler;
+ unsigned long prescaler;
int wakeup_irq, ret;
rtcdev = devm_kzalloc(&pdev->dev, sizeof(struct mpfs_rtc_dev), GFP_KERNEL);
@@ -251,7 +234,7 @@ static int mpfs_rtc_probe(struct platform_device *pdev)
/* range is capped by alarm max, lower reg is 31:0 & upper is 10:0 */
rtcdev->rtc->range_max = GENMASK_ULL(42, 0);
- clk = mpfs_rtc_init_clk(&pdev->dev);
+ clk = devm_clk_get_enabled(&pdev->dev, "rtc");
if (IS_ERR(clk))
return PTR_ERR(clk);
@@ -275,14 +258,13 @@ static int mpfs_rtc_probe(struct platform_device *pdev)
/* prescaler hardware adds 1 to reg value */
prescaler = clk_get_rate(devm_clk_get(&pdev->dev, "rtcref")) - 1;
-
if (prescaler > MAX_PRESCALER_COUNT) {
- dev_dbg(&pdev->dev, "invalid prescaler %d\n", prescaler);
+ dev_dbg(&pdev->dev, "invalid prescaler %lu\n", prescaler);
return -EINVAL;
}
writel(prescaler, rtcdev->base + PRESCALER_REG);
- dev_info(&pdev->dev, "prescaler set to: 0x%X \r\n", prescaler);
+ dev_info(&pdev->dev, "prescaler set to: %lu\n", prescaler);
device_init_wakeup(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, wakeup_irq);
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 53d4e253e81f..762cf03345f1 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -291,14 +291,6 @@ static const struct rtc_class_ops mxc_rtc_ops = {
.alarm_irq_enable = mxc_rtc_alarm_irq_enable,
};
-static void mxc_rtc_action(void *p)
-{
- struct rtc_plat_data *pdata = p;
-
- clk_disable_unprepare(pdata->clk_ref);
- clk_disable_unprepare(pdata->clk_ipg);
-}
-
static int mxc_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
@@ -341,33 +333,18 @@ static int mxc_rtc_probe(struct platform_device *pdev)
rtc->range_max = (1 << 16) * 86400ULL - 1;
}
- pdata->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ pdata->clk_ipg = devm_clk_get_enabled(&pdev->dev, "ipg");
if (IS_ERR(pdata->clk_ipg)) {
dev_err(&pdev->dev, "unable to get ipg clock!\n");
return PTR_ERR(pdata->clk_ipg);
}
- ret = clk_prepare_enable(pdata->clk_ipg);
- if (ret)
- return ret;
-
- pdata->clk_ref = devm_clk_get(&pdev->dev, "ref");
+ pdata->clk_ref = devm_clk_get_enabled(&pdev->dev, "ref");
if (IS_ERR(pdata->clk_ref)) {
- clk_disable_unprepare(pdata->clk_ipg);
dev_err(&pdev->dev, "unable to get ref clock!\n");
return PTR_ERR(pdata->clk_ref);
}
- ret = clk_prepare_enable(pdata->clk_ref);
- if (ret) {
- clk_disable_unprepare(pdata->clk_ipg);
- return ret;
- }
-
- ret = devm_add_action_or_reset(&pdev->dev, mxc_rtc_action, pdata);
- if (ret)
- return ret;
-
rate = clk_get_rate(pdata->clk_ref);
if (rate == 32768)
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index cdc623b3e365..dd170e3efd83 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -521,10 +521,9 @@ static int rv3028_param_get(struct device *dev, struct rtc_param *param)
{
struct rv3028_data *rv3028 = dev_get_drvdata(dev);
int ret;
+ u32 value;
switch(param->param) {
- u32 value;
-
case RTC_PARAM_BACKUP_SWITCH_MODE:
ret = regmap_read(rv3028->regmap, RV3028_BACKUP, &value);
if (ret < 0)
@@ -554,9 +553,9 @@ static int rv3028_param_get(struct device *dev, struct rtc_param *param)
static int rv3028_param_set(struct device *dev, struct rtc_param *param)
{
struct rv3028_data *rv3028 = dev_get_drvdata(dev);
+ u8 mode;
switch(param->param) {
- u8 mode;
case RTC_PARAM_BACKUP_SWITCH_MODE:
switch (param->uvalue) {
case RTC_BSM_DISABLED:
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 40c0f7ed36e0..aae40d20d086 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -107,6 +107,8 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
wdt_pdev->dev.parent = &rtc_pdev->dev;
wdt_pdev->dev.platform_data = &wdt_pdata;
rc = platform_device_add(wdt_pdev);
+ if (rc)
+ platform_device_put(wdt_pdev);
}
if (rc)
diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
index 7a0f181d3fef..ba23163cc042 100644
--- a/drivers/rtc/rtc-ti-k3.c
+++ b/drivers/rtc/rtc-ti-k3.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
@@ -45,14 +46,6 @@
#define K3RTC_MIN_OFFSET (-277761)
#define K3RTC_MAX_OFFSET (277778)
-/**
- * struct ti_k3_rtc_soc_data - Private of compatible data for ti-k3-rtc
- * @unlock_irq_erratum: Has erratum for unlock infinite IRQs (erratum i2327)
- */
-struct ti_k3_rtc_soc_data {
- const bool unlock_irq_erratum;
-};
-
static const struct regmap_config ti_k3_rtc_regmap_config = {
.name = "peripheral-registers",
.reg_bits = 32,
@@ -118,7 +111,6 @@ static const struct reg_field ti_rtc_reg_fields[] = {
* @rtc_dev: rtc device
* @regmap: rtc mmio regmap
* @r_fields: rtc register fields
- * @soc: SoC compatible match data
*/
struct ti_k3_rtc {
unsigned int irq;
@@ -127,7 +119,6 @@ struct ti_k3_rtc {
struct rtc_device *rtc_dev;
struct regmap *regmap;
struct regmap_field *r_fields[K3_RTC_MAX_FIELDS];
- const struct ti_k3_rtc_soc_data *soc;
};
static int k3rtc_field_read(struct ti_k3_rtc *priv, enum ti_k3_rtc_fields f)
@@ -190,11 +181,22 @@ static int k3rtc_unlock_rtc(struct ti_k3_rtc *priv)
/* Skip fence since we are going to check the unlock bit as fence */
ret = regmap_field_read_poll_timeout(priv->r_fields[K3RTC_UNLOCK], ret,
- !ret, 2, priv->sync_timeout_us);
+ ret, 2, priv->sync_timeout_us);
return ret;
}
+/*
+ * This is the list of SoCs affected by TI's i2327 errata causing the RTC
+ * state-machine to break if not unlocked fast enough during boot. These
+ * SoCs must have the bootloader unlock this device very early in the
+ * boot-flow before we (Linux) can use this device.
+ */
+static const struct soc_device_attribute has_erratum_i2327[] = {
+ { .family = "AM62X", .revision = "SR1.0" },
+ { /* sentinel */ }
+};
+
static int k3rtc_configure(struct device *dev)
{
int ret;
@@ -208,7 +210,7 @@ static int k3rtc_configure(struct device *dev)
*
* In such occurrence, it is assumed that the RTC module is unusable
*/
- if (priv->soc->unlock_irq_erratum) {
+ if (soc_device_match(has_erratum_i2327)) {
ret = k3rtc_check_unlocked(priv);
/* If there is an error OR if we are locked, return error */
if (ret) {
@@ -513,21 +515,12 @@ static struct nvmem_config ti_k3_rtc_nvmem_config = {
static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv)
{
- int ret;
struct clk *clk;
- clk = devm_clk_get(dev, "osc32k");
+ clk = devm_clk_get_enabled(dev, "osc32k");
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
- if (ret)
- return ret;
-
priv->rate_32k = clk_get_rate(clk);
/* Make sure we are exact 32k clock. Else, try to compensate delay */
@@ -542,24 +535,19 @@ static int k3rtc_get_32kclk(struct device *dev, struct ti_k3_rtc *priv)
*/
priv->sync_timeout_us = (u32)(DIV_ROUND_UP_ULL(1000000, priv->rate_32k) * 4);
- return ret;
+ return 0;
}
static int k3rtc_get_vbusclk(struct device *dev, struct ti_k3_rtc *priv)
{
- int ret;
struct clk *clk;
/* Note: VBUS isn't a context clock, it is needed for hardware operation */
- clk = devm_clk_get(dev, "vbus");
+ clk = devm_clk_get_enabled(dev, "vbus");
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- return devm_add_action_or_reset(dev, (void (*)(void *))clk_disable_unprepare, clk);
+ return 0;
}
static int ti_k3_rtc_probe(struct platform_device *pdev)
@@ -602,8 +590,6 @@ static int ti_k3_rtc_probe(struct platform_device *pdev)
if (IS_ERR(priv->rtc_dev))
return PTR_ERR(priv->rtc_dev);
- priv->soc = of_device_get_match_data(dev);
-
priv->rtc_dev->ops = &ti_k3_rtc_ops;
priv->rtc_dev->range_max = (1ULL << 48) - 1; /* 48Bit seconds */
ti_k3_rtc_nvmem_config.priv = priv;
@@ -635,12 +621,8 @@ static int ti_k3_rtc_probe(struct platform_device *pdev)
return devm_rtc_nvmem_register(priv->rtc_dev, &ti_k3_rtc_nvmem_config);
}
-static const struct ti_k3_rtc_soc_data ti_k3_am62_data = {
- .unlock_irq_erratum = true,
-};
-
static const struct of_device_id ti_k3_rtc_of_match_table[] = {
- {.compatible = "ti,am62-rtc", .data = &ti_k3_am62_data},
+ {.compatible = "ti,am62-rtc" },
{}
};
MODULE_DEVICE_TABLE(of, ti_k3_rtc_of_match_table);
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 68f49e2e964c..131293f7f152 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -15,12 +15,14 @@
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/kobject.h>
#include <linux/uaccess.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/debug.h>
#include <asm/diag.h>
+#include <asm/scsw.h>
#include "vmur.h"
@@ -78,6 +80,8 @@ static struct ccw_driver ur_driver = {
static DEFINE_MUTEX(vmur_mutex);
+static void ur_uevent(struct work_struct *ws);
+
/*
* Allocation, freeing, getting and putting of urdev structures
*
@@ -108,6 +112,7 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
ccw_device_get_id(cdev, &urd->dev_id);
mutex_init(&urd->io_mutex);
init_waitqueue_head(&urd->wait);
+ INIT_WORK(&urd->uevent_work, ur_uevent);
spin_lock_init(&urd->open_lock);
refcount_set(&urd->ref_count, 1);
urd->cdev = cdev;
@@ -275,6 +280,18 @@ out:
return rc;
}
+static void ur_uevent(struct work_struct *ws)
+{
+ struct urdev *urd = container_of(ws, struct urdev, uevent_work);
+ char *envp[] = {
+ "EVENT=unsol_de", /* Unsolicited device-end interrupt */
+ NULL
+ };
+
+ kobject_uevent_env(&urd->cdev->dev.kobj, KOBJ_CHANGE, envp);
+ urdev_put(urd);
+}
+
/*
* ur interrupt handler, called from the ccw_device layer
*/
@@ -288,12 +305,21 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
irb->scsw.cmd.count);
}
+ urd = dev_get_drvdata(&cdev->dev);
if (!intparm) {
TRACE("ur_int_handler: unsolicited interrupt\n");
+
+ if (scsw_dstat(&irb->scsw) & DEV_STAT_DEV_END) {
+ /*
+ * Userspace might be interested in a transition to
+ * device-ready state.
+ */
+ urdev_get(urd);
+ schedule_work(&urd->uevent_work);
+ }
+
return;
}
- urd = dev_get_drvdata(&cdev->dev);
- BUG_ON(!urd);
/* On special conditions irb is an error pointer */
if (IS_ERR(irb))
urd->io_request_rc = PTR_ERR(irb);
@@ -809,7 +835,6 @@ static int ur_probe(struct ccw_device *cdev)
rc = -ENOMEM;
goto fail_urdev_put;
}
- cdev->handler = ur_int_handler;
/* validate virtual unit record device */
urd->class = get_urd_class(urd);
@@ -823,6 +848,7 @@ static int ur_probe(struct ccw_device *cdev)
}
spin_lock_irq(get_ccwdev_lock(cdev));
dev_set_drvdata(&cdev->dev, urd);
+ cdev->handler = ur_int_handler;
spin_unlock_irq(get_ccwdev_lock(cdev));
mutex_unlock(&vmur_mutex);
@@ -928,6 +954,10 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force)
rc = -EBUSY;
goto fail_urdev_put;
}
+ if (cancel_work_sync(&urd->uevent_work)) {
+ /* Work not run yet - need to release reference here */
+ urdev_put(urd);
+ }
device_destroy(vmur_class, urd->char_device->dev);
cdev_del(urd->char_device);
urd->char_device = NULL;
@@ -963,6 +993,7 @@ static void ur_remove(struct ccw_device *cdev)
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
urdev_put(dev_get_drvdata(&cdev->dev));
dev_set_drvdata(&cdev->dev, NULL);
+ cdev->handler = NULL;
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
mutex_unlock(&vmur_mutex);
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index 608b0719ce17..92d17d7cb47b 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -13,6 +13,7 @@
#define _VMUR_H_
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
@@ -76,6 +77,7 @@ struct urdev {
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag; /* "urdev is open" flag */
spinlock_t open_lock; /* serialize critical sections */
+ struct work_struct uevent_work; /* work to send uevent */
};
/*
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 53d91bf9c12a..c07d2e3b4bcf 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -254,7 +254,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
} else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
@@ -282,7 +282,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
} else {
struct cpl_t6_act_open_req *req =
(struct cpl_t6_act_open_req *)skb->head;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 39e16eab47aa..ddc048069af2 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2233,7 +2233,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
if (fip->probe_tries < FIP_VN_RLIM_COUNT) {
fip->probe_tries++;
- wait = prandom_u32() % FIP_VN_PROBE_WAIT;
+ wait = prandom_u32_max(FIP_VN_PROBE_WAIT);
} else
wait = FIP_VN_RLIM_INT;
mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait));
@@ -3125,7 +3125,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
fcoe_all_vn2vn, 0);
fip->port_ka_time = jiffies +
msecs_to_jiffies(FIP_VN_BEACON_INT +
- (prandom_u32() % FIP_VN_BEACON_FUZZ));
+ prandom_u32_max(FIP_VN_BEACON_FUZZ));
}
if (time_before(fip->port_ka_time, next_time))
next_time = fip->port_ka_time;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index c7f834ba8edb..d38ebd7281b9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2156,8 +2156,8 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
* This function makes an running random selection decision on FCF record to
* use through a sequence of @fcf_cnt eligible FCF records with equal
* probability. To perform integer manunipulation of random numbers with
- * size unit32_t, the lower 16 bits of the 32-bit random number returned
- * from prandom_u32() are taken as the random random number generated.
+ * size unit32_t, a 16-bit random number returned from get_random_u16() is
+ * taken as the random random number generated.
*
* Returns true when outcome is for the newly read FCF record should be
* chosen; otherwise, return false when outcome is for keeping the previously
@@ -2169,7 +2169,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
uint32_t rand_num;
/* Get 16-bit uniform random number */
- rand_num = 0xFFFF & prandom_u32();
+ rand_num = get_random_u16();
/* Decision with probability 1/fcf_cnt */
if ((fcf_cnt * rand_num) < 0xFFFF)
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cecfb2cb4c7b..df2fe7bd26d1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -618,7 +618,7 @@ static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
sizeof(struct qedi_endpoint *)), GFP_KERNEL);
if (!qedi->ep_tbl)
return -ENOMEM;
- port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
+ port_id = prandom_u32_max(QEDI_LOCAL_PORT_RANGE);
if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
QEDI_LOCAL_PORT_MIN, port_id)) {
qedi_cm_free_mem(qedi);
diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig
index 58cf8c40d08d..ed4c571f8771 100644
--- a/drivers/soc/sifive/Kconfig
+++ b/drivers/soc/sifive/Kconfig
@@ -2,9 +2,9 @@
if SOC_SIFIVE
-config SIFIVE_L2
- bool "Sifive L2 Cache controller"
+config SIFIVE_CCACHE
+ bool "Sifive Composable Cache controller"
help
- Support for the L2 cache controller on SiFive platforms.
+ Support for the composable cache controller on SiFive platforms.
endif
diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile
index b5caff77938f..1f5dc339bf82 100644
--- a/drivers/soc/sifive/Makefile
+++ b/drivers/soc/sifive/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_SIFIVE_L2) += sifive_l2_cache.o
+obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/soc/sifive/sifive_ccache.c
new file mode 100644
index 000000000000..1c171150e878
--- /dev/null
+++ b/drivers/soc/sifive/sifive_ccache.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiFive composable cache controller Driver
+ *
+ * Copyright (C) 2018-2022 SiFive, Inc.
+ *
+ */
+
+#define pr_fmt(fmt) "CCACHE: " fmt
+
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/device.h>
+#include <linux/bitfield.h>
+#include <asm/cacheinfo.h>
+#include <soc/sifive/sifive_ccache.h>
+
+#define SIFIVE_CCACHE_DIRECCFIX_LOW 0x100
+#define SIFIVE_CCACHE_DIRECCFIX_HIGH 0x104
+#define SIFIVE_CCACHE_DIRECCFIX_COUNT 0x108
+
+#define SIFIVE_CCACHE_DIRECCFAIL_LOW 0x120
+#define SIFIVE_CCACHE_DIRECCFAIL_HIGH 0x124
+#define SIFIVE_CCACHE_DIRECCFAIL_COUNT 0x128
+
+#define SIFIVE_CCACHE_DATECCFIX_LOW 0x140
+#define SIFIVE_CCACHE_DATECCFIX_HIGH 0x144
+#define SIFIVE_CCACHE_DATECCFIX_COUNT 0x148
+
+#define SIFIVE_CCACHE_DATECCFAIL_LOW 0x160
+#define SIFIVE_CCACHE_DATECCFAIL_HIGH 0x164
+#define SIFIVE_CCACHE_DATECCFAIL_COUNT 0x168
+
+#define SIFIVE_CCACHE_CONFIG 0x00
+#define SIFIVE_CCACHE_CONFIG_BANK_MASK GENMASK_ULL(7, 0)
+#define SIFIVE_CCACHE_CONFIG_WAYS_MASK GENMASK_ULL(15, 8)
+#define SIFIVE_CCACHE_CONFIG_SETS_MASK GENMASK_ULL(23, 16)
+#define SIFIVE_CCACHE_CONFIG_BLKS_MASK GENMASK_ULL(31, 24)
+
+#define SIFIVE_CCACHE_WAYENABLE 0x08
+#define SIFIVE_CCACHE_ECCINJECTERR 0x40
+
+#define SIFIVE_CCACHE_MAX_ECCINTR 4
+
+static void __iomem *ccache_base;
+static int g_irq[SIFIVE_CCACHE_MAX_ECCINTR];
+static struct riscv_cacheinfo_ops ccache_cache_ops;
+static int level;
+
+enum {
+ DIR_CORR = 0,
+ DATA_CORR,
+ DATA_UNCORR,
+ DIR_UNCORR,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *sifive_test;
+
+static ssize_t ccache_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ unsigned int val;
+
+ if (kstrtouint_from_user(data, count, 0, &val))
+ return -EINVAL;
+ if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
+ writel(val, ccache_base + SIFIVE_CCACHE_ECCINJECTERR);
+ else
+ return -EINVAL;
+ return count;
+}
+
+static const struct file_operations ccache_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = ccache_write
+};
+
+static void setup_sifive_debug(void)
+{
+ sifive_test = debugfs_create_dir("sifive_ccache_cache", NULL);
+
+ debugfs_create_file("sifive_debug_inject_error", 0200,
+ sifive_test, NULL, &ccache_fops);
+}
+#endif
+
+static void ccache_config_read(void)
+{
+ u32 cfg;
+
+ cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG);
+ pr_info("%llu banks, %llu ways, sets/bank=%llu, bytes/block=%llu\n",
+ FIELD_GET(SIFIVE_CCACHE_CONFIG_BANK_MASK, cfg),
+ FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg),
+ BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_SETS_MASK, cfg)),
+ BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_BLKS_MASK, cfg)));
+
+ cfg = readl(ccache_base + SIFIVE_CCACHE_WAYENABLE);
+ pr_info("Index of the largest way enabled: %u\n", cfg);
+}
+
+static const struct of_device_id sifive_ccache_ids[] = {
+ { .compatible = "sifive,fu540-c000-ccache" },
+ { .compatible = "sifive,fu740-c000-ccache" },
+ { .compatible = "sifive,ccache0" },
+ { /* end of table */ }
+};
+
+static ATOMIC_NOTIFIER_HEAD(ccache_err_chain);
+
+int register_sifive_ccache_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ccache_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_sifive_ccache_error_notifier);
+
+int unregister_sifive_ccache_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ccache_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_sifive_ccache_error_notifier);
+
+static int ccache_largest_wayenabled(void)
+{
+ return readl(ccache_base + SIFIVE_CCACHE_WAYENABLE) & 0xFF;
+}
+
+static ssize_t number_of_ways_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", ccache_largest_wayenabled());
+}
+
+static DEVICE_ATTR_RO(number_of_ways_enabled);
+
+static struct attribute *priv_attrs[] = {
+ &dev_attr_number_of_ways_enabled.attr,
+ NULL,
+};
+
+static const struct attribute_group priv_attr_group = {
+ .attrs = priv_attrs,
+};
+
+static const struct attribute_group *ccache_get_priv_group(struct cacheinfo
+ *this_leaf)
+{
+ /* We want to use private group for composable cache only */
+ if (this_leaf->level == level)
+ return &priv_attr_group;
+ else
+ return NULL;
+}
+
+static irqreturn_t ccache_int_handler(int irq, void *device)
+{
+ unsigned int add_h, add_l;
+
+ if (irq == g_irq[DIR_CORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_LOW);
+ pr_err("DirError @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DirError interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_CE,
+ "DirECCFix");
+ }
+ if (irq == g_irq[DIR_UNCORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_LOW);
+ /* Reading this register clears the DirFail interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_UE,
+ "DirECCFail");
+ panic("CCACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l);
+ }
+ if (irq == g_irq[DATA_CORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_LOW);
+ pr_err("DataError @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DataError interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_CE,
+ "DatECCFix");
+ }
+ if (irq == g_irq[DATA_UNCORR]) {
+ add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_HIGH);
+ add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_LOW);
+ pr_err("DataFail @ 0x%08X.%08X\n", add_h, add_l);
+ /* Reading this register clears the DataFail interrupt sig */
+ readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_COUNT);
+ atomic_notifier_call_chain(&ccache_err_chain,
+ SIFIVE_CCACHE_ERR_TYPE_UE,
+ "DatECCFail");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init sifive_ccache_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int i, rc, intr_num;
+
+ np = of_find_matching_node(NULL, sifive_ccache_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ ccache_base = ioremap(res.start, resource_size(&res));
+ if (!ccache_base)
+ return -ENOMEM;
+
+ if (of_property_read_u32(np, "cache-level", &level))
+ return -ENOENT;
+
+ intr_num = of_property_count_u32_elems(np, "interrupts");
+ if (!intr_num) {
+ pr_err("No interrupts property\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < intr_num; i++) {
+ g_irq[i] = irq_of_parse_and_map(np, i);
+ rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
+ NULL);
+ if (rc) {
+ pr_err("Could not request IRQ %d\n", g_irq[i]);
+ return rc;
+ }
+ }
+
+ ccache_config_read();
+
+ ccache_cache_ops.get_priv_group = ccache_get_priv_group;
+ riscv_set_cacheinfo_ops(&ccache_cache_ops);
+
+#ifdef CONFIG_DEBUG_FS
+ setup_sifive_debug();
+#endif
+ return 0;
+}
+
+device_initcall(sifive_ccache_init);
diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c
deleted file mode 100644
index 59640a1d0b28..000000000000
--- a/drivers/soc/sifive/sifive_l2_cache.c
+++ /dev/null
@@ -1,237 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SiFive L2 cache controller Driver
- *
- * Copyright (C) 2018-2019 SiFive, Inc.
- *
- */
-#include <linux/debugfs.h>
-#include <linux/interrupt.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/device.h>
-#include <asm/cacheinfo.h>
-#include <soc/sifive/sifive_l2_cache.h>
-
-#define SIFIVE_L2_DIRECCFIX_LOW 0x100
-#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
-#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
-
-#define SIFIVE_L2_DIRECCFAIL_LOW 0x120
-#define SIFIVE_L2_DIRECCFAIL_HIGH 0x124
-#define SIFIVE_L2_DIRECCFAIL_COUNT 0x128
-
-#define SIFIVE_L2_DATECCFIX_LOW 0x140
-#define SIFIVE_L2_DATECCFIX_HIGH 0x144
-#define SIFIVE_L2_DATECCFIX_COUNT 0x148
-
-#define SIFIVE_L2_DATECCFAIL_LOW 0x160
-#define SIFIVE_L2_DATECCFAIL_HIGH 0x164
-#define SIFIVE_L2_DATECCFAIL_COUNT 0x168
-
-#define SIFIVE_L2_CONFIG 0x00
-#define SIFIVE_L2_WAYENABLE 0x08
-#define SIFIVE_L2_ECCINJECTERR 0x40
-
-#define SIFIVE_L2_MAX_ECCINTR 4
-
-static void __iomem *l2_base;
-static int g_irq[SIFIVE_L2_MAX_ECCINTR];
-static struct riscv_cacheinfo_ops l2_cache_ops;
-
-enum {
- DIR_CORR = 0,
- DATA_CORR,
- DATA_UNCORR,
- DIR_UNCORR,
-};
-
-#ifdef CONFIG_DEBUG_FS
-static struct dentry *sifive_test;
-
-static ssize_t l2_write(struct file *file, const char __user *data,
- size_t count, loff_t *ppos)
-{
- unsigned int val;
-
- if (kstrtouint_from_user(data, count, 0, &val))
- return -EINVAL;
- if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
- writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
- else
- return -EINVAL;
- return count;
-}
-
-static const struct file_operations l2_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = l2_write
-};
-
-static void setup_sifive_debug(void)
-{
- sifive_test = debugfs_create_dir("sifive_l2_cache", NULL);
-
- debugfs_create_file("sifive_debug_inject_error", 0200,
- sifive_test, NULL, &l2_fops);
-}
-#endif
-
-static void l2_config_read(void)
-{
- u32 regval, val;
-
- regval = readl(l2_base + SIFIVE_L2_CONFIG);
- val = regval & 0xFF;
- pr_info("L2CACHE: No. of Banks in the cache: %d\n", val);
- val = (regval & 0xFF00) >> 8;
- pr_info("L2CACHE: No. of ways per bank: %d\n", val);
- val = (regval & 0xFF0000) >> 16;
- pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val);
- val = (regval & 0xFF000000) >> 24;
- pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val);
-
- regval = readl(l2_base + SIFIVE_L2_WAYENABLE);
- pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval);
-}
-
-static const struct of_device_id sifive_l2_ids[] = {
- { .compatible = "sifive,fu540-c000-ccache" },
- { .compatible = "sifive,fu740-c000-ccache" },
- { /* end of table */ },
-};
-
-static ATOMIC_NOTIFIER_HEAD(l2_err_chain);
-
-int register_sifive_l2_error_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&l2_err_chain, nb);
-}
-EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier);
-
-int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&l2_err_chain, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
-
-static int l2_largest_wayenabled(void)
-{
- return readl(l2_base + SIFIVE_L2_WAYENABLE) & 0xFF;
-}
-
-static ssize_t number_of_ways_enabled_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%u\n", l2_largest_wayenabled());
-}
-
-static DEVICE_ATTR_RO(number_of_ways_enabled);
-
-static struct attribute *priv_attrs[] = {
- &dev_attr_number_of_ways_enabled.attr,
- NULL,
-};
-
-static const struct attribute_group priv_attr_group = {
- .attrs = priv_attrs,
-};
-
-static const struct attribute_group *l2_get_priv_group(struct cacheinfo *this_leaf)
-{
- /* We want to use private group for L2 cache only */
- if (this_leaf->level == 2)
- return &priv_attr_group;
- else
- return NULL;
-}
-
-static irqreturn_t l2_int_handler(int irq, void *device)
-{
- unsigned int add_h, add_l;
-
- if (irq == g_irq[DIR_CORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW);
- pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DirError interrupt sig */
- readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
- "DirECCFix");
- }
- if (irq == g_irq[DIR_UNCORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DIRECCFAIL_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DIRECCFAIL_LOW);
- /* Reading this register clears the DirFail interrupt sig */
- readl(l2_base + SIFIVE_L2_DIRECCFAIL_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
- "DirECCFail");
- panic("L2CACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l);
- }
- if (irq == g_irq[DATA_CORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
- pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DataError interrupt sig */
- readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
- "DatECCFix");
- }
- if (irq == g_irq[DATA_UNCORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW);
- pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DataFail interrupt sig */
- readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
- "DatECCFail");
- }
-
- return IRQ_HANDLED;
-}
-
-static int __init sifive_l2_init(void)
-{
- struct device_node *np;
- struct resource res;
- int i, rc, intr_num;
-
- np = of_find_matching_node(NULL, sifive_l2_ids);
- if (!np)
- return -ENODEV;
-
- if (of_address_to_resource(np, 0, &res))
- return -ENODEV;
-
- l2_base = ioremap(res.start, resource_size(&res));
- if (!l2_base)
- return -ENOMEM;
-
- intr_num = of_property_count_u32_elems(np, "interrupts");
- if (!intr_num) {
- pr_err("L2CACHE: no interrupts property\n");
- return -ENODEV;
- }
-
- for (i = 0; i < intr_num; i++) {
- g_irq[i] = irq_of_parse_and_map(np, i);
- rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
- if (rc) {
- pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]);
- return rc;
- }
- }
-
- l2_config_read();
-
- l2_cache_ops.get_priv_group = l2_get_priv_group;
- riscv_set_cacheinfo_ops(&l2_cache_ops);
-
-#ifdef CONFIG_DEBUG_FS
- setup_sifive_debug();
-#endif
- return 0;
-}
-device_initcall(sifive_l2_init);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 3336d2b78bf7..d9204c590d9a 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1202,7 +1202,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
opt2 |= T5_ISS_F;
- rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+ rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
opt2 |= T5_OPT_2_VALID_F;
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index bbb248a2686f..f00b2f62d8e3 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -2437,7 +2437,7 @@ int tb_xdomain_init(void)
tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
- xdomain_property_block_gen = prandom_u32();
+ xdomain_property_block_gen = get_random_u32();
return 0;
}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 13cdd9def087..434f83168546 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -603,21 +603,6 @@ config SERIAL_MUX_CONSOLE
select SERIAL_CORE_CONSOLE
default y
-config PDC_CONSOLE
- bool "PDC software console support"
- depends on PARISC && !SERIAL_MUX && VT
- help
- Saying Y here will enable the software based PDC console to be
- used as the system console. This is useful for machines in
- which the hardware based console has not been written yet. The
- following steps must be completed to use the PDC console:
-
- 1. create the device entry (mknod /dev/ttyB0 c 11 0)
- 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0
- 3. Add device ttyB0 to /etc/securetty (if you want to log on as
- root on this console.)
- 4. Change the kernel command console parameter to: console=ttyB0
-
config SERIAL_SUNSAB
tristate "Sun Siemens SAB82532 serial support"
depends on SPARC && PCI
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 38a861e22c33..7753e586e65a 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1298,7 +1298,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
/* limit fbsize to max visible screen size */
if (fix->smem_len > yres*fix->line_length)
- fix->smem_len = yres*fix->line_length;
+ fix->smem_len = ALIGN(yres*fix->line_length, 4*1024*1024);
fix->accel = FB_ACCEL_NONE;
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index fd5d701106e1..00d789b6c0fa 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -167,7 +167,7 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
memcpy(&m->id, &uvesafb_cn_id, sizeof(m->id));
m->seq = seq;
m->len = len;
- m->ack = prandom_u32();
+ m->ack = get_random_u32();
/* uvesafb_task structure */
memcpy(m + 1, &task->t, sizeof(task->t));
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index dce3a16996b9..4ec18ceb2f21 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -138,6 +138,7 @@ struct share_check {
u64 root_objectid;
u64 inum;
int share_count;
+ bool have_delayed_delete_refs;
};
static inline int extent_is_shared(struct share_check *sc)
@@ -820,16 +821,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
struct preftrees *preftrees, struct share_check *sc)
{
struct btrfs_delayed_ref_node *node;
- struct btrfs_delayed_extent_op *extent_op = head->extent_op;
struct btrfs_key key;
- struct btrfs_key tmp_op_key;
struct rb_node *n;
int count;
int ret = 0;
- if (extent_op && extent_op->update_key)
- btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
-
spin_lock(&head->lock);
for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
node = rb_entry(n, struct btrfs_delayed_ref_node,
@@ -855,10 +851,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
case BTRFS_TREE_BLOCK_REF_KEY: {
/* NORMAL INDIRECT METADATA backref */
struct btrfs_delayed_tree_ref *ref;
+ struct btrfs_key *key_ptr = NULL;
+
+ if (head->extent_op && head->extent_op->update_key) {
+ btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
+ key_ptr = &key;
+ }
ref = btrfs_delayed_node_to_tree_ref(node);
ret = add_indirect_ref(fs_info, preftrees, ref->root,
- &tmp_op_key, ref->level + 1,
+ key_ptr, ref->level + 1,
node->bytenr, count, sc,
GFP_ATOMIC);
break;
@@ -884,13 +886,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
key.offset = ref->offset;
/*
- * Found a inum that doesn't match our known inum, we
- * know it's shared.
+ * If we have a share check context and a reference for
+ * another inode, we can't exit immediately. This is
+ * because even if this is a BTRFS_ADD_DELAYED_REF
+ * reference we may find next a BTRFS_DROP_DELAYED_REF
+ * which cancels out this ADD reference.
+ *
+ * If this is a DROP reference and there was no previous
+ * ADD reference, then we need to signal that when we
+ * process references from the extent tree (through
+ * add_inline_refs() and add_keyed_refs()), we should
+ * not exit early if we find a reference for another
+ * inode, because one of the delayed DROP references
+ * may cancel that reference in the extent tree.
*/
- if (sc && sc->inum && ref->objectid != sc->inum) {
- ret = BACKREF_FOUND_SHARED;
- goto out;
- }
+ if (sc && count < 0)
+ sc->have_delayed_delete_refs = true;
ret = add_indirect_ref(fs_info, preftrees, ref->root,
&key, 0, node->bytenr, count, sc,
@@ -920,7 +931,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
}
if (!ret)
ret = extent_is_shared(sc);
-out:
+
spin_unlock(&head->lock);
return ret;
}
@@ -1023,7 +1034,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
- if (sc && sc->inum && key.objectid != sc->inum) {
+ if (sc && sc->inum && key.objectid != sc->inum &&
+ !sc->have_delayed_delete_refs) {
ret = BACKREF_FOUND_SHARED;
break;
}
@@ -1033,6 +1045,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
ret = add_indirect_ref(fs_info, preftrees, root,
&key, 0, bytenr, count,
sc, GFP_NOFS);
+
break;
}
default:
@@ -1122,7 +1135,8 @@ static int add_keyed_refs(struct btrfs_root *extent_root,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = btrfs_extent_data_ref_offset(leaf, dref);
- if (sc && sc->inum && key.objectid != sc->inum) {
+ if (sc && sc->inum && key.objectid != sc->inum &&
+ !sc->have_delayed_delete_refs) {
ret = BACKREF_FOUND_SHARED;
break;
}
@@ -1522,6 +1536,9 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache
{
struct btrfs_backref_shared_cache_entry *entry;
+ if (!cache->use_cache)
+ return false;
+
if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
return false;
@@ -1557,6 +1574,19 @@ static bool lookup_backref_shared_cache(struct btrfs_backref_shared_cache *cache
return false;
*is_shared = entry->is_shared;
+ /*
+ * If the node at this level is shared, than all nodes below are also
+ * shared. Currently some of the nodes below may be marked as not shared
+ * because we have just switched from one leaf to another, and switched
+ * also other nodes above the leaf and below the current level, so mark
+ * them as shared.
+ */
+ if (*is_shared) {
+ for (int i = 0; i < level; i++) {
+ cache->entries[i].is_shared = true;
+ cache->entries[i].gen = entry->gen;
+ }
+ }
return true;
}
@@ -1573,6 +1603,9 @@ static void store_backref_shared_cache(struct btrfs_backref_shared_cache *cache,
struct btrfs_backref_shared_cache_entry *entry;
u64 gen;
+ if (!cache->use_cache)
+ return;
+
if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
return;
@@ -1648,6 +1681,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
.root_objectid = root->root_key.objectid,
.inum = inum,
.share_count = 0,
+ .have_delayed_delete_refs = false,
};
int level;
@@ -1669,6 +1703,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
/* -1 means we are in the bytenr of the data extent. */
level = -1;
ULIST_ITER_INIT(&uiter);
+ cache->use_cache = true;
while (1) {
bool is_shared;
bool cached;
@@ -1698,6 +1733,24 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
extent_gen > btrfs_root_last_snapshot(&root->root_item))
break;
+ /*
+ * If our data extent was not directly shared (without multiple
+ * reference items), than it might have a single reference item
+ * with a count > 1 for the same offset, which means there are 2
+ * (or more) file extent items that point to the data extent -
+ * this happens when a file extent item needs to be split and
+ * then one item gets moved to another leaf due to a b+tree leaf
+ * split when inserting some item. In this case the file extent
+ * items may be located in different leaves and therefore some
+ * of the leaves may be referenced through shared subtrees while
+ * others are not. Since our extent buffer cache only works for
+ * a single path (by far the most common case and simpler to
+ * deal with), we can not use it if we have multiple leaves
+ * (which implies multiple paths).
+ */
+ if (level == -1 && tmp->nnodes > 1)
+ cache->use_cache = false;
+
if (level >= 0)
store_backref_shared_cache(cache, root, bytenr,
level, false);
@@ -1713,6 +1766,7 @@ int btrfs_is_data_extent_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
break;
}
shared.share_count = 0;
+ shared.have_delayed_delete_refs = false;
cond_resched();
}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 52ae6957b414..8e69584d538d 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -29,6 +29,7 @@ struct btrfs_backref_shared_cache {
* a given data extent should never exceed the maximum b+tree height.
*/
struct btrfs_backref_shared_cache_entry entries[BTRFS_MAX_LEVEL];
+ bool use_cache;
};
typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 32c415cfbdfe..deebc8ddbd93 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -774,10 +774,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
out:
- /* REVIEW */
if (wait && caching_ctl)
ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
- /* wait_event(caching_ctl->wait, space_cache_v1_done(cache)); */
if (caching_ctl)
btrfs_put_caching_control(caching_ctl);
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
index 618275af19c4..83cb0378096f 100644
--- a/fs/btrfs/extent-io-tree.c
+++ b/fs/btrfs/extent-io-tree.c
@@ -1641,16 +1641,17 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
int err;
u64 failed_start;
- while (1) {
+ err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
+ cached_state, NULL, GFP_NOFS);
+ while (err == -EEXIST) {
+ if (failed_start != start)
+ clear_extent_bit(tree, start, failed_start - 1,
+ EXTENT_LOCKED, cached_state);
+
+ wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
&failed_start, cached_state, NULL,
GFP_NOFS);
- if (err == -EEXIST) {
- wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
- start = failed_start;
- } else
- break;
- WARN_ON(start > end);
}
return err;
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 4ef4167072b8..ec6e1752af2c 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -348,6 +348,7 @@ static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd)
switch (sctx->proto) {
case 1: return cmd <= BTRFS_SEND_C_MAX_V1;
case 2: return cmd <= BTRFS_SEND_C_MAX_V2;
+ case 3: return cmd <= BTRFS_SEND_C_MAX_V3;
default: return false;
}
}
@@ -6469,7 +6470,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
if (ret < 0)
goto out;
}
- if (sctx->cur_inode_needs_verity) {
+
+ if (proto_cmd_ok(sctx, BTRFS_SEND_C_ENABLE_VERITY)
+ && sctx->cur_inode_needs_verity) {
ret = process_verity(sctx);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
index 0a4537775e0c..f7585cfa7e52 100644
--- a/fs/btrfs/send.h
+++ b/fs/btrfs/send.h
@@ -10,7 +10,12 @@
#include <linux/types.h>
#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
+/* Conditional support for the upcoming protocol version. */
+#ifdef CONFIG_BTRFS_DEBUG
+#define BTRFS_SEND_STREAM_VERSION 3
+#else
#define BTRFS_SEND_STREAM_VERSION 2
+#endif
/*
* In send stream v1, no command is larger than 64K. In send stream v2, no limit
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9ebb7cee7978..4af5e55abc15 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -362,7 +362,7 @@ static int ceph_fill_fragtree(struct inode *inode,
if (nsplits != ci->i_fragtree_nsplits) {
update = true;
} else if (nsplits) {
- i = prandom_u32() % nsplits;
+ i = prandom_u32_max(nsplits);
id = le32_to_cpu(fragtree->splits[i].frag);
if (!__ceph_find_frag(ci, id))
update = true;
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 8d0a6d2c2da4..3fbabc98e1f7 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -29,7 +29,7 @@ static int __mdsmap_get_random_mds(struct ceph_mdsmap *m, bool ignore_laggy)
return -1;
/* pick */
- n = prandom_u32() % n;
+ n = prandom_u32_max(n);
for (j = 0, i = 0; i < m->possible_max_rank; i++) {
if (CEPH_MDS_IS_READY(i, ignore_laggy))
j++;
diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c
index b705dac383f9..fe88b67c863f 100644
--- a/fs/cifs/cached_dir.c
+++ b/fs/cifs/cached_dir.c
@@ -5,13 +5,98 @@
* Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
*/
+#include <linux/namei.h>
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
#include "smb2proto.h"
#include "cached_dir.h"
-struct cached_fid *init_cached_dir(const char *path);
+static struct cached_fid *init_cached_dir(const char *path);
+static void free_cached_dir(struct cached_fid *cfid);
+
+static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ const char *path,
+ bool lookup_only)
+{
+ struct cached_fid *cfid;
+
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry(cfid, &cfids->entries, entry) {
+ if (!strcmp(cfid->path, path)) {
+ /*
+ * If it doesn't have a lease it is either not yet
+ * fully cached or it may be in the process of
+ * being deleted due to a lease break.
+ */
+ if (!cfid->has_lease) {
+ spin_unlock(&cfids->cfid_list_lock);
+ return NULL;
+ }
+ kref_get(&cfid->refcount);
+ spin_unlock(&cfids->cfid_list_lock);
+ return cfid;
+ }
+ }
+ if (lookup_only) {
+ spin_unlock(&cfids->cfid_list_lock);
+ return NULL;
+ }
+ if (cfids->num_entries >= MAX_CACHED_FIDS) {
+ spin_unlock(&cfids->cfid_list_lock);
+ return NULL;
+ }
+ cfid = init_cached_dir(path);
+ if (cfid == NULL) {
+ spin_unlock(&cfids->cfid_list_lock);
+ return NULL;
+ }
+ cfid->cfids = cfids;
+ cfids->num_entries++;
+ list_add(&cfid->entry, &cfids->entries);
+ cfid->on_list = true;
+ kref_get(&cfid->refcount);
+ spin_unlock(&cfids->cfid_list_lock);
+ return cfid;
+}
+
+static struct dentry *
+path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
+{
+ struct dentry *dentry;
+ const char *s, *p;
+ char sep;
+
+ sep = CIFS_DIR_SEP(cifs_sb);
+ dentry = dget(cifs_sb->root);
+ s = path;
+
+ do {
+ struct inode *dir = d_inode(dentry);
+ struct dentry *child;
+
+ if (!S_ISDIR(dir->i_mode)) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOTDIR);
+ break;
+ }
+
+ /* skip separators */
+ while (*s == sep)
+ s++;
+ if (!*s)
+ break;
+ p = s++;
+ /* next separator */
+ while (*s && *s != sep)
+ s++;
+
+ child = lookup_positive_unlocked(p, dentry, s - p);
+ dput(dentry);
+ dentry = child;
+ } while (!IS_ERR(dentry));
+ return dentry;
+}
/*
* Open the and cache a directory handle.
@@ -33,61 +118,57 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
struct kvec qi_iov[1];
int rc, flags = 0;
- __le16 utf16_path = 0; /* Null - since an open of top of share */
+ __le16 *utf16_path = NULL;
u8 oplock = SMB2_OPLOCK_LEVEL_II;
struct cifs_fid *pfid;
- struct dentry *dentry;
+ struct dentry *dentry = NULL;
struct cached_fid *cfid;
+ struct cached_fids *cfids;
- if (tcon == NULL || tcon->nohandlecache ||
+ if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
is_smb1_server(tcon->ses->server))
return -EOPNOTSUPP;
ses = tcon->ses;
server = ses->server;
+ cfids = tcon->cfids;
+
+ if (!server->ops->new_lease_key)
+ return -EIO;
if (cifs_sb->root == NULL)
return -ENOENT;
- if (!path[0])
- dentry = cifs_sb->root;
- else
- return -ENOENT;
+ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ if (!utf16_path)
+ return -ENOMEM;
- cfid = tcon->cfids->cfid;
+ cfid = find_or_create_cached_dir(cfids, path, lookup_only);
if (cfid == NULL) {
- cfid = init_cached_dir(path);
- tcon->cfids->cfid = cfid;
+ kfree(utf16_path);
+ return -ENOENT;
}
- if (cfid == NULL)
- return -ENOMEM;
-
- mutex_lock(&cfid->fid_mutex);
- if (cfid->is_valid) {
- cifs_dbg(FYI, "found a cached root file handle\n");
+ /*
+ * At this point we either have a lease already and we can just
+ * return it. If not we are guaranteed to be the only thread accessing
+ * this cfid.
+ */
+ if (cfid->has_lease) {
*ret_cfid = cfid;
- kref_get(&cfid->refcount);
- mutex_unlock(&cfid->fid_mutex);
+ kfree(utf16_path);
return 0;
}
/*
* We do not hold the lock for the open because in case
- * SMB2_open needs to reconnect, it will end up calling
- * cifs_mark_open_files_invalid() which takes the lock again
- * thus causing a deadlock
+ * SMB2_open needs to reconnect.
+ * This is safe because no other thread will be able to get a ref
+ * to the cfid until we have finished opening the file and (possibly)
+ * acquired a lease.
*/
- mutex_unlock(&cfid->fid_mutex);
-
- if (lookup_only)
- return -ENOENT;
-
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- if (!server->ops->new_lease_key)
- return -EIO;
-
pfid = &cfid->fid;
server->ops->new_lease_key(pfid);
@@ -108,7 +189,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
oparms.reconnect = false;
rc = SMB2_open_init(tcon, server,
- &rqst[0], &oplock, &oparms, &utf16_path);
+ &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto oshr_free;
smb2_set_next_command(tcon, &rqst[0]);
@@ -131,47 +212,13 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
rc = compound_send_recv(xid, ses, server,
flags, 2, rqst,
resp_buftype, rsp_iov);
- mutex_lock(&cfid->fid_mutex);
-
- /*
- * Now we need to check again as the cached root might have
- * been successfully re-opened from a concurrent process
- */
-
- if (cfid->is_valid) {
- /* work was already done */
-
- /* stash fids for close() later */
- struct cifs_fid fid = {
- .persistent_fid = pfid->persistent_fid,
- .volatile_fid = pfid->volatile_fid,
- };
-
- /*
- * caller expects this func to set the fid in cfid to valid
- * cached root, so increment the refcount.
- */
- kref_get(&cfid->refcount);
-
- mutex_unlock(&cfid->fid_mutex);
-
- if (rc == 0) {
- /* close extra handle outside of crit sec */
- SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
- }
- rc = 0;
- goto oshr_free;
- }
-
- /* Cached root is still invalid, continue normaly */
-
if (rc) {
if (rc == -EREMCHG) {
tcon->need_reconnect = true;
pr_warn_once("server share %s deleted\n",
tcon->tree_name);
}
- goto oshr_exit;
+ goto oshr_free;
}
atomic_inc(&tcon->num_remote_opens);
@@ -183,31 +230,18 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
#endif /* CIFS_DEBUG2 */
- cfid->tcon = tcon;
- cfid->is_valid = true;
- cfid->dentry = dentry;
- if (dentry)
- dget(dentry);
- kref_init(&cfid->refcount);
+ if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+ goto oshr_free;
- /* BB TBD check to see if oplock level check can be removed below */
- if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
- /*
- * See commit 2f94a3125b87. Increment the refcount when we
- * get a lease for root, release it if lease break occurs
- */
- kref_get(&cfid->refcount);
- cfid->has_lease = true;
- smb2_parse_contexts(server, o_rsp,
- &oparms.fid->epoch,
- oparms.fid->lease_key, &oplock,
- NULL, NULL);
- } else
- goto oshr_exit;
+
+ smb2_parse_contexts(server, o_rsp,
+ &oparms.fid->epoch,
+ oparms.fid->lease_key, &oplock,
+ NULL, NULL);
qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
- goto oshr_exit;
+ goto oshr_free;
if (!smb2_validate_and_copy_iov(
le16_to_cpu(qi_rsp->OutputBufferOffset),
sizeof(struct smb2_file_all_info),
@@ -215,15 +249,40 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
(char *)&cfid->file_all_info))
cfid->file_all_info_is_valid = true;
+ if (!path[0])
+ dentry = dget(cifs_sb->root);
+ else {
+ dentry = path_to_dentry(cifs_sb, path);
+ if (IS_ERR(dentry))
+ goto oshr_free;
+ }
+ cfid->dentry = dentry;
+ cfid->tcon = tcon;
cfid->time = jiffies;
+ cfid->is_open = true;
+ cfid->has_lease = true;
-oshr_exit:
- mutex_unlock(&cfid->fid_mutex);
oshr_free:
+ kfree(utf16_path);
SMB2_open_free(&rqst[0]);
SMB2_query_info_free(&rqst[1]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ spin_lock(&cfids->cfid_list_lock);
+ if (!cfid->has_lease) {
+ if (cfid->on_list) {
+ list_del(&cfid->entry);
+ cfid->on_list = false;
+ cfids->num_entries--;
+ }
+ rc = -ENOENT;
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+ if (rc) {
+ free_cached_dir(cfid);
+ cfid = NULL;
+ }
+
if (rc == 0)
*ret_cfid = cfid;
@@ -235,20 +294,22 @@ int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
struct cached_fid **ret_cfid)
{
struct cached_fid *cfid;
+ struct cached_fids *cfids = tcon->cfids;
- cfid = tcon->cfids->cfid;
- if (cfid == NULL)
+ if (cfids == NULL)
return -ENOENT;
- mutex_lock(&cfid->fid_mutex);
- if (cfid->dentry == dentry) {
- cifs_dbg(FYI, "found a cached root file handle by dentry\n");
- *ret_cfid = cfid;
- kref_get(&cfid->refcount);
- mutex_unlock(&cfid->fid_mutex);
- return 0;
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry(cfid, &cfids->entries, entry) {
+ if (dentry && cfid->dentry == dentry) {
+ cifs_dbg(FYI, "found a cached root file handle by dentry\n");
+ kref_get(&cfid->refcount);
+ *ret_cfid = cfid;
+ spin_unlock(&cfids->cfid_list_lock);
+ return 0;
+ }
}
- mutex_unlock(&cfid->fid_mutex);
+ spin_unlock(&cfids->cfid_list_lock);
return -ENOENT;
}
@@ -257,63 +318,29 @@ smb2_close_cached_fid(struct kref *ref)
{
struct cached_fid *cfid = container_of(ref, struct cached_fid,
refcount);
- struct cached_dirent *dirent, *q;
- if (cfid->is_valid) {
- cifs_dbg(FYI, "clear cached root file handle\n");
- SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
- cfid->fid.volatile_fid);
+ spin_lock(&cfid->cfids->cfid_list_lock);
+ if (cfid->on_list) {
+ list_del(&cfid->entry);
+ cfid->on_list = false;
+ cfid->cfids->num_entries--;
}
+ spin_unlock(&cfid->cfids->cfid_list_lock);
- /*
- * We only check validity above to send SMB2_close,
- * but we still need to invalidate these entries
- * when this function is called
- */
- cfid->is_valid = false;
- cfid->file_all_info_is_valid = false;
- cfid->has_lease = false;
- if (cfid->dentry) {
- dput(cfid->dentry);
- cfid->dentry = NULL;
- }
- /*
- * Delete all cached dirent names
- */
- mutex_lock(&cfid->dirents.de_mutex);
- list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
- list_del(&dirent->entry);
- kfree(dirent->name);
- kfree(dirent);
+ dput(cfid->dentry);
+ cfid->dentry = NULL;
+
+ if (cfid->is_open) {
+ SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid);
}
- cfid->dirents.is_valid = 0;
- cfid->dirents.is_failed = 0;
- cfid->dirents.ctx = NULL;
- cfid->dirents.pos = 0;
- mutex_unlock(&cfid->dirents.de_mutex);
+ free_cached_dir(cfid);
}
void close_cached_dir(struct cached_fid *cfid)
{
- mutex_lock(&cfid->fid_mutex);
kref_put(&cfid->refcount, smb2_close_cached_fid);
- mutex_unlock(&cfid->fid_mutex);
-}
-
-void close_cached_dir_lease_locked(struct cached_fid *cfid)
-{
- if (cfid->has_lease) {
- cfid->has_lease = false;
- kref_put(&cfid->refcount, smb2_close_cached_fid);
- }
-}
-
-void close_cached_dir_lease(struct cached_fid *cfid)
-{
- mutex_lock(&cfid->fid_mutex);
- close_cached_dir_lease_locked(cfid);
- mutex_unlock(&cfid->fid_mutex);
}
/*
@@ -326,41 +353,62 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
struct cached_fid *cfid;
struct cifs_tcon *tcon;
struct tcon_link *tlink;
+ struct cached_fids *cfids;
for (node = rb_first(root); node; node = rb_next(node)) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
tcon = tlink_tcon(tlink);
if (IS_ERR(tcon))
continue;
- cfid = tcon->cfids->cfid;
- if (cfid == NULL)
+ cfids = tcon->cfids;
+ if (cfids == NULL)
continue;
- mutex_lock(&cfid->fid_mutex);
- if (cfid->dentry) {
+ list_for_each_entry(cfid, &cfids->entries, entry) {
dput(cfid->dentry);
cfid->dentry = NULL;
}
- mutex_unlock(&cfid->fid_mutex);
}
}
/*
- * Invalidate and close all cached dirs when a TCON has been reset
+ * Invalidate all cached dirs when a TCON has been reset
* due to a session loss.
*/
void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
{
- struct cached_fid *cfid = tcon->cfids->cfid;
-
- if (cfid == NULL)
- return;
-
- mutex_lock(&cfid->fid_mutex);
- cfid->is_valid = false;
- /* cached handle is not valid, so SMB2_CLOSE won't be sent below */
- close_cached_dir_lease_locked(cfid);
- memset(&cfid->fid, 0, sizeof(struct cifs_fid));
- mutex_unlock(&cfid->fid_mutex);
+ struct cached_fids *cfids = tcon->cfids;
+ struct cached_fid *cfid, *q;
+ struct list_head entry;
+
+ INIT_LIST_HEAD(&entry);
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+ list_del(&cfid->entry);
+ list_add(&cfid->entry, &entry);
+ cfids->num_entries--;
+ cfid->is_open = false;
+ /* To prevent race with smb2_cached_lease_break() */
+ kref_get(&cfid->refcount);
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+ list_for_each_entry_safe(cfid, q, &entry, entry) {
+ cfid->on_list = false;
+ list_del(&cfid->entry);
+ cancel_work_sync(&cfid->lease_break);
+ if (cfid->has_lease) {
+ /*
+ * We lease was never cancelled from the server so we
+ * need to drop the reference.
+ */
+ spin_lock(&cfids->cfid_list_lock);
+ cfid->has_lease = false;
+ spin_unlock(&cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
+ /* Drop the extra reference opened above*/
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
+ }
}
static void
@@ -369,51 +417,83 @@ smb2_cached_lease_break(struct work_struct *work)
struct cached_fid *cfid = container_of(work,
struct cached_fid, lease_break);
- close_cached_dir_lease(cfid);
+ spin_lock(&cfid->cfids->cfid_list_lock);
+ cfid->has_lease = false;
+ spin_unlock(&cfid->cfids->cfid_list_lock);
+ kref_put(&cfid->refcount, smb2_close_cached_fid);
}
int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
{
- struct cached_fid *cfid = tcon->cfids->cfid;
+ struct cached_fids *cfids = tcon->cfids;
+ struct cached_fid *cfid;
- if (cfid == NULL)
+ if (cfids == NULL)
return false;
- if (cfid->is_valid &&
- !memcmp(lease_key,
- cfid->fid.lease_key,
- SMB2_LEASE_KEY_SIZE)) {
- cfid->time = 0;
- INIT_WORK(&cfid->lease_break,
- smb2_cached_lease_break);
- queue_work(cifsiod_wq,
- &cfid->lease_break);
- return true;
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry(cfid, &cfids->entries, entry) {
+ if (cfid->has_lease &&
+ !memcmp(lease_key,
+ cfid->fid.lease_key,
+ SMB2_LEASE_KEY_SIZE)) {
+ cfid->time = 0;
+ /*
+ * We found a lease remove it from the list
+ * so no threads can access it.
+ */
+ list_del(&cfid->entry);
+ cfid->on_list = false;
+ cfids->num_entries--;
+
+ queue_work(cifsiod_wq,
+ &cfid->lease_break);
+ spin_unlock(&cfids->cfid_list_lock);
+ return true;
+ }
}
+ spin_unlock(&cfids->cfid_list_lock);
return false;
}
-struct cached_fid *init_cached_dir(const char *path)
+static struct cached_fid *init_cached_dir(const char *path)
{
struct cached_fid *cfid;
- cfid = kzalloc(sizeof(*cfid), GFP_KERNEL);
+ cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
if (!cfid)
return NULL;
- cfid->path = kstrdup(path, GFP_KERNEL);
+ cfid->path = kstrdup(path, GFP_ATOMIC);
if (!cfid->path) {
kfree(cfid);
return NULL;
}
+ INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
+ INIT_LIST_HEAD(&cfid->entry);
INIT_LIST_HEAD(&cfid->dirents.entries);
mutex_init(&cfid->dirents.de_mutex);
- mutex_init(&cfid->fid_mutex);
+ spin_lock_init(&cfid->fid_lock);
+ kref_init(&cfid->refcount);
return cfid;
}
-void free_cached_dir(struct cached_fid *cfid)
+static void free_cached_dir(struct cached_fid *cfid)
{
+ struct cached_dirent *dirent, *q;
+
+ dput(cfid->dentry);
+ cfid->dentry = NULL;
+
+ /*
+ * Delete all cached dirent names
+ */
+ list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
+ list_del(&dirent->entry);
+ kfree(dirent->name);
+ kfree(dirent);
+ }
+
kfree(cfid->path);
cfid->path = NULL;
kfree(cfid);
@@ -426,15 +506,34 @@ struct cached_fids *init_cached_dirs(void)
cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
if (!cfids)
return NULL;
- mutex_init(&cfids->cfid_list_mutex);
+ spin_lock_init(&cfids->cfid_list_lock);
+ INIT_LIST_HEAD(&cfids->entries);
return cfids;
}
+/*
+ * Called from tconInfoFree when we are tearing down the tcon.
+ * There are no active users or open files/directories at this point.
+ */
void free_cached_dirs(struct cached_fids *cfids)
{
- if (cfids->cfid) {
- free_cached_dir(cfids->cfid);
- cfids->cfid = NULL;
+ struct cached_fid *cfid, *q;
+ struct list_head entry;
+
+ INIT_LIST_HEAD(&entry);
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+ cfid->on_list = false;
+ cfid->is_open = false;
+ list_del(&cfid->entry);
+ list_add(&cfid->entry, &entry);
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+ list_for_each_entry_safe(cfid, q, &entry, entry) {
+ list_del(&cfid->entry);
+ free_cached_dir(cfid);
}
+
kfree(cfids);
}
diff --git a/fs/cifs/cached_dir.h b/fs/cifs/cached_dir.h
index bdf6c3866653..e536304ca2ce 100644
--- a/fs/cifs/cached_dir.h
+++ b/fs/cifs/cached_dir.h
@@ -31,14 +31,17 @@ struct cached_dirents {
};
struct cached_fid {
+ struct list_head entry;
+ struct cached_fids *cfids;
const char *path;
- bool is_valid:1; /* Do we have a useable root fid */
- bool file_all_info_is_valid:1;
bool has_lease:1;
+ bool is_open:1;
+ bool on_list:1;
+ bool file_all_info_is_valid:1;
unsigned long time; /* jiffies of when lease was taken */
struct kref refcount;
struct cifs_fid fid;
- struct mutex fid_mutex;
+ spinlock_t fid_lock;
struct cifs_tcon *tcon;
struct dentry *dentry;
struct work_struct lease_break;
@@ -46,9 +49,14 @@ struct cached_fid {
struct cached_dirents dirents;
};
+#define MAX_CACHED_FIDS 16
struct cached_fids {
- struct mutex cfid_list_mutex;
- struct cached_fid *cfid;
+ /* Must be held when:
+ * - accessing the cfids->entries list
+ */
+ spinlock_t cfid_list_lock;
+ int num_entries;
+ struct list_head entries;
};
extern struct cached_fids *init_cached_dirs(void);
@@ -61,8 +69,6 @@ extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
struct dentry *dentry,
struct cached_fid **cfid);
extern void close_cached_dir(struct cached_fid *cfid);
-extern void close_cached_dir_lease(struct cached_fid *cfid);
-extern void close_cached_dir_lease_locked(struct cached_fid *cfid);
extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb);
extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon);
extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]);
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index b87cbbe6d2d4..d86d78d5bfdc 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -91,6 +91,13 @@ struct smb3_notify {
bool watch_tree;
} __packed;
+struct smb3_notify_info {
+ __u32 completion_filter;
+ bool watch_tree;
+ __u32 data_len; /* size of notify data below */
+ __u8 notify_data[];
+} __packed;
+
#define CIFS_IOCTL_MAGIC 0xCF
#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
#define CIFS_IOC_SET_INTEGRITY _IO(CIFS_IOCTL_MAGIC, 4)
@@ -100,6 +107,7 @@ struct smb3_notify {
#define CIFS_DUMP_KEY _IOWR(CIFS_IOCTL_MAGIC, 8, struct smb3_key_debug_info)
#define CIFS_IOC_NOTIFY _IOW(CIFS_IOCTL_MAGIC, 9, struct smb3_notify)
#define CIFS_DUMP_FULL_KEY _IOWR(CIFS_IOCTL_MAGIC, 10, struct smb3_full_key_debug_info)
+#define CIFS_IOC_NOTIFY_INFO _IOWR(CIFS_IOCTL_MAGIC, 11, struct smb3_notify_info)
#define CIFS_IOC_SHUTDOWN _IOR ('X', 125, __u32)
/*
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8042d7280dec..c6ac19223ddc 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -396,6 +396,7 @@ cifs_alloc_inode(struct super_block *sb)
cifs_inode->epoch = 0;
spin_lock_init(&cifs_inode->open_file_lock);
generate_random_uuid(cifs_inode->lease_key);
+ cifs_inode->symlink_target = NULL;
/*
* Can not set i_flags here - they get immediately overwritten to zero
@@ -412,7 +413,11 @@ cifs_alloc_inode(struct super_block *sb)
static void
cifs_free_inode(struct inode *inode)
{
- kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+
+ if (S_ISLNK(inode->i_mode))
+ kfree(cinode->symlink_target);
+ kmem_cache_free(cifs_inode_cachep, cinode);
}
static void
@@ -1139,7 +1144,7 @@ const struct inode_operations cifs_file_inode_ops = {
};
const struct inode_operations cifs_symlink_inode_ops = {
- .get_link = cifs_get_link,
+ .get_link = simple_get_link,
.permission = cifs_permission,
.listxattr = cifs_listxattr,
};
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 52ddf4163b98..1420acf987f0 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -185,6 +185,19 @@ struct cifs_cred {
struct cifs_ace *aces;
};
+struct cifs_open_info_data {
+ char *symlink_target;
+ union {
+ struct smb2_file_all_info fi;
+ struct smb311_posix_qinfo posix_fi;
+ };
+};
+
+static inline void cifs_free_open_info(struct cifs_open_info_data *data)
+{
+ kfree(data->symlink_target);
+}
+
/*
*****************************************************************
* Except the CIFS PDUs themselves all the
@@ -307,20 +320,20 @@ struct smb_version_operations {
int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const char *);
/* query path data from the server */
- int (*query_path_info)(const unsigned int, struct cifs_tcon *,
- struct cifs_sb_info *, const char *,
- FILE_ALL_INFO *, bool *, bool *);
+ int (*query_path_info)(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse);
/* query file data from the server */
- int (*query_file_info)(const unsigned int, struct cifs_tcon *,
- struct cifs_fid *, FILE_ALL_INFO *);
+ int (*query_file_info)(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile, struct cifs_open_info_data *data);
/* query reparse tag from srv to determine which type of special file */
int (*query_reparse_tag)(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *path,
__u32 *reparse_tag);
/* get server index number */
- int (*get_srv_inum)(const unsigned int, struct cifs_tcon *,
- struct cifs_sb_info *, const char *,
- u64 *uniqueid, FILE_ALL_INFO *);
+ int (*get_srv_inum)(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path, u64 *uniqueid,
+ struct cifs_open_info_data *data);
/* set size by path */
int (*set_path_size)(const unsigned int, struct cifs_tcon *,
const char *, __u64, struct cifs_sb_info *, bool);
@@ -369,8 +382,8 @@ struct smb_version_operations {
struct cifs_sb_info *, const char *,
char **, bool);
/* open a file for non-posix mounts */
- int (*open)(const unsigned int, struct cifs_open_parms *,
- __u32 *, FILE_ALL_INFO *);
+ int (*open)(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
+ void *buf);
/* set fid protocol-specific info */
void (*set_fid)(struct cifsFileInfo *, struct cifs_fid *, __u32);
/* close a file */
@@ -441,7 +454,7 @@ struct smb_version_operations {
int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *src_file, void __user *);
int (*notify)(const unsigned int xid, struct file *pfile,
- void __user *pbuf);
+ void __user *pbuf, bool return_changes);
int (*query_mf_symlink)(unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const unsigned char *,
char *, unsigned int *);
@@ -1123,6 +1136,7 @@ struct cifs_fattr {
struct timespec64 cf_mtime;
struct timespec64 cf_ctime;
u32 cf_cifstag;
+ char *cf_symlink_target;
};
/*
@@ -1385,6 +1399,7 @@ struct cifsFileInfo {
struct work_struct put; /* work for the final part of _put */
struct delayed_work deferred;
bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
+ char *symlink_target;
};
struct cifs_io_parms {
@@ -1543,6 +1558,7 @@ struct cifsInodeInfo {
struct list_head deferred_closes; /* list of deferred closes */
spinlock_t deferred_lock; /* protection on deferred list */
bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
+ char *symlink_target;
};
static inline struct cifsInodeInfo *
@@ -2111,4 +2127,14 @@ static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses)
return sizeof(ses->workstation_name);
}
+static inline void move_cifs_info_to_smb2(struct smb2_file_all_info *dst, const FILE_ALL_INFO *src)
+{
+ memcpy(dst, src, (size_t)((u8 *)&src->AccessFlags - (u8 *)src));
+ dst->AccessFlags = src->AccessFlags;
+ dst->CurrentByteOffset = src->CurrentByteOffset;
+ dst->Mode = src->Mode;
+ dst->AlignmentRequirement = src->AlignmentRequirement;
+ dst->FileNameLength = src->FileNameLength;
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 84ec71bdfacd..83e83d8beabb 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -182,10 +182,9 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile,
extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
extern void cifs_down_write(struct rw_semaphore *sem);
-extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid,
- struct file *file,
- struct tcon_link *tlink,
- __u32 oplock);
+struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ struct tcon_link *tlink, __u32 oplock,
+ const char *symlink_target);
extern int cifs_posix_open(const char *full_path, struct inode **inode,
struct super_block *sb, int mode,
unsigned int f_flags, __u32 *oplock, __u16 *netfid,
@@ -200,9 +199,9 @@ extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr);
extern struct inode *cifs_iget(struct super_block *sb,
struct cifs_fattr *fattr);
-extern int cifs_get_inode_info(struct inode **inode, const char *full_path,
- FILE_ALL_INFO *data, struct super_block *sb,
- int xid, const struct cifs_fid *fid);
+int cifs_get_inode_info(struct inode **inode, const char *full_path,
+ struct cifs_open_info_data *data, struct super_block *sb, int xid,
+ const struct cifs_fid *fid);
extern int smb311_posix_get_inode_info(struct inode **pinode, const char *search_path,
struct super_block *sb, unsigned int xid);
extern int cifs_get_inode_info_unix(struct inode **pinode,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 7a808e41b1b8..1724066c1536 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -2305,7 +2305,7 @@ int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *pTcon,
remap);
}
rename_info->target_name_len = cpu_to_le32(2 * len_of_str);
- count = 12 /* sizeof(struct set_file_rename) */ + (2 * len_of_str);
+ count = sizeof(struct set_file_rename) + (2 * len_of_str);
byte_count += count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->TotalDataCount = pSMB->DataCount;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 40900aace416..ffb291579bb9 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2832,9 +2832,12 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
* sessinit is sent but no second negprot
*/
struct rfc1002_session_packet *ses_init_buf;
+ unsigned int req_noscope_len;
struct smb_hdr *smb_buf;
+
ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
GFP_KERNEL);
+
if (ses_init_buf) {
ses_init_buf->trailer.session_req.called_len = 32;
@@ -2870,8 +2873,12 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
ses_init_buf->trailer.session_req.scope2 = 0;
smb_buf = (struct smb_hdr *)ses_init_buf;
- /* sizeof RFC1002_SESSION_REQUEST with no scope */
- smb_buf->smb_buf_length = cpu_to_be32(0x81000044);
+ /* sizeof RFC1002_SESSION_REQUEST with no scopes */
+ req_noscope_len = sizeof(struct rfc1002_session_packet) - 2;
+
+ /* == cpu_to_be32(0x81000044) */
+ smb_buf->smb_buf_length =
+ cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | req_noscope_len);
rc = smb_send(server, smb_buf, 0x44);
kfree(ses_init_buf);
/*
@@ -3922,12 +3929,11 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
pSMB->AndXCommand = 0xFF;
pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
bcc_ptr = &pSMB->Password[0];
- if (tcon->pipe || (ses->server->sec_mode & SECMODE_USER)) {
- pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
- *bcc_ptr = 0; /* password is null byte */
- bcc_ptr++; /* skip password */
- /* already aligned so no need to do it below */
- }
+
+ pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
+ *bcc_ptr = 0; /* password is null byte */
+ bcc_ptr++; /* skip password */
+ /* already aligned so no need to do it below */
if (ses->server->sign)
smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f58869306309..a5c73c2af3a2 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -165,10 +165,9 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
/* Inode operations in similar order to how they appear in Linux file fs.h */
-static int
-cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
- struct tcon_link *tlink, unsigned oflags, umode_t mode,
- __u32 *oplock, struct cifs_fid *fid)
+static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
+ struct tcon_link *tlink, unsigned int oflags, umode_t mode, __u32 *oplock,
+ struct cifs_fid *fid, struct cifs_open_info_data *buf)
{
int rc = -ENOENT;
int create_options = CREATE_NOT_DIR;
@@ -177,7 +176,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
struct cifs_tcon *tcon = tlink_tcon(tlink);
const char *full_path;
void *page = alloc_dentry_path();
- FILE_ALL_INFO *buf = NULL;
struct inode *newinode = NULL;
int disposition;
struct TCP_Server_Info *server = tcon->ses->server;
@@ -290,12 +288,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
goto out;
}
- buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (buf == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
/*
* if we're not using unix extensions, see if we need to set
* ATTR_READONLY on the create call
@@ -364,8 +356,7 @@ cifs_create_get_file_info:
{
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/* TODO: Add support for calling POSIX query info here, but passing in fid */
- rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb,
- xid, fid);
+ rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, xid, fid);
if (newinode) {
if (server->ops->set_lease_key)
server->ops->set_lease_key(newinode, fid);
@@ -402,7 +393,6 @@ cifs_create_set_dentry:
d_add(direntry, newinode);
out:
- kfree(buf);
free_dentry_path(page);
return rc;
@@ -423,10 +413,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
struct tcon_link *tlink;
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
- struct cifs_fid fid;
+ struct cifs_fid fid = {};
struct cifs_pending_open open;
__u32 oplock;
struct cifsFileInfo *file_info;
+ struct cifs_open_info_data buf = {};
if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb))))
return -EIO;
@@ -484,8 +475,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
cifs_add_pending_open(&fid, tlink, &open);
rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
- &oplock, &fid);
-
+ &oplock, &fid, &buf);
if (rc) {
cifs_del_pending_open(&open);
goto out;
@@ -510,7 +500,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
file->f_op = &cifs_file_direct_ops;
}
- file_info = cifs_new_fileinfo(&fid, file, tlink, oplock);
+ file_info = cifs_new_fileinfo(&fid, file, tlink, oplock, buf.symlink_target);
if (file_info == NULL) {
if (server->ops->close)
server->ops->close(xid, tcon, &fid);
@@ -526,6 +516,7 @@ out:
cifs_put_tlink(tlink);
out_free_xid:
free_xid(xid);
+ cifs_free_open_info(&buf);
return rc;
}
@@ -547,6 +538,7 @@ int cifs_create(struct user_namespace *mnt_userns, struct inode *inode,
struct TCP_Server_Info *server;
struct cifs_fid fid;
__u32 oplock;
+ struct cifs_open_info_data buf = {};
cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
inode, direntry, direntry);
@@ -565,11 +557,11 @@ int cifs_create(struct user_namespace *mnt_userns, struct inode *inode,
if (server->ops->new_lease_key)
server->ops->new_lease_key(&fid);
- rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
- &oplock, &fid);
+ rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode, &oplock, &fid, &buf);
if (!rc && server->ops->close)
server->ops->close(xid, tcon, &fid);
+ cifs_free_open_info(&buf);
cifs_put_tlink(tlink);
out_free_xid:
free_xid(xid);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 7d756721e1a6..f6ffee514c34 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -209,16 +209,14 @@ posix_open_ret:
}
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
-static int
-cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
- struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
- struct cifs_fid *fid, unsigned int xid)
+static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
+ struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
{
int rc;
int desired_access;
int disposition;
int create_options = CREATE_NOT_DIR;
- FILE_ALL_INFO *buf;
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
@@ -255,10 +253,6 @@ cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *ci
/* BB pass O_SYNC flag through on file attributes .. BB */
- buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
/* O_SYNC also has bit for O_DSYNC so following check picks up either */
if (f_flags & O_SYNC)
create_options |= CREATE_WRITE_THROUGH;
@@ -276,9 +270,8 @@ cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *ci
oparms.reconnect = false;
rc = server->ops->open(xid, &oparms, oplock, buf);
-
if (rc)
- goto out;
+ return rc;
/* TODO: Add support for calling posix query info but with passing in fid */
if (tcon->unix_ext)
@@ -294,8 +287,6 @@ cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *ci
rc = -EOPENSTALE;
}
-out:
- kfree(buf);
return rc;
}
@@ -325,9 +316,9 @@ cifs_down_write(struct rw_semaphore *sem)
static void cifsFileInfo_put_work(struct work_struct *work);
-struct cifsFileInfo *
-cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
- struct tcon_link *tlink, __u32 oplock)
+struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ struct tcon_link *tlink, __u32 oplock,
+ const char *symlink_target)
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
@@ -347,6 +338,15 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
return NULL;
}
+ if (symlink_target) {
+ cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
+ if (!cfile->symlink_target) {
+ kfree(fdlocks);
+ kfree(cfile);
+ return NULL;
+ }
+ }
+
INIT_LIST_HEAD(&fdlocks->locks);
fdlocks->cfile = cfile;
cfile->llist = fdlocks;
@@ -440,6 +440,7 @@ static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
cifs_put_tlink(cifs_file->tlink);
dput(cifs_file->dentry);
cifs_sb_deactive(sb);
+ kfree(cifs_file->symlink_target);
kfree(cifs_file);
}
@@ -488,7 +489,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct super_block *sb = inode->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifs_fid fid;
+ struct cifs_fid fid = {};
struct cifs_pending_open open;
bool oplock_break_cancelled;
@@ -570,8 +571,9 @@ int cifs_open(struct inode *inode, struct file *file)
void *page;
const char *full_path;
bool posix_open_ok = false;
- struct cifs_fid fid;
+ struct cifs_fid fid = {};
struct cifs_pending_open open;
+ struct cifs_open_info_data data = {};
xid = get_xid();
@@ -662,15 +664,15 @@ int cifs_open(struct inode *inode, struct file *file)
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &fid);
- rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
- file->f_flags, &oplock, &fid, xid);
+ rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
+ xid, &data);
if (rc) {
cifs_del_pending_open(&open);
goto out;
}
}
- cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
+ cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
if (cfile == NULL) {
if (server->ops->close)
server->ops->close(xid, tcon, &fid);
@@ -712,6 +714,7 @@ out:
free_dentry_path(page);
free_xid(xid);
cifs_put_tlink(tlink);
+ cifs_free_open_info(&data);
return rc;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ad10c61ab5c9..7cf96e581d24 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -210,6 +210,17 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
*/
inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
}
+
+ if (S_ISLNK(fattr->cf_mode)) {
+ kfree(cifs_i->symlink_target);
+ cifs_i->symlink_target = fattr->cf_symlink_target;
+ fattr->cf_symlink_target = NULL;
+
+ if (unlikely(!cifs_i->symlink_target))
+ inode->i_link = ERR_PTR(-EOPNOTSUPP);
+ else
+ inode->i_link = cifs_i->symlink_target;
+ }
spin_unlock(&inode->i_lock);
if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
@@ -347,13 +358,20 @@ cifs_get_file_info_unix(struct file *filp)
int rc;
unsigned int xid;
FILE_UNIX_BASIC_INFO find_data;
- struct cifs_fattr fattr;
+ struct cifs_fattr fattr = {};
struct inode *inode = file_inode(filp);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
xid = get_xid();
+
+ if (cfile->symlink_target) {
+ fattr.cf_symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+ if (!fattr.cf_symlink_target)
+ return -ENOMEM;
+ }
+
rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->fid.netfid, &find_data);
if (!rc) {
cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
@@ -378,6 +396,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
FILE_UNIX_BASIC_INFO find_data;
struct cifs_fattr fattr;
struct cifs_tcon *tcon;
+ struct TCP_Server_Info *server;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
@@ -387,10 +406,12 @@ int cifs_get_inode_info_unix(struct inode **pinode,
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
+ server = tcon->ses->server;
/* could have done a find first instead but this returns more info */
rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
cifs_sb->local_nls, cifs_remap(cifs_sb));
+ cifs_dbg(FYI, "%s: query path info: rc = %d\n", __func__, rc);
cifs_put_tlink(tlink);
if (!rc) {
@@ -410,6 +431,17 @@ int cifs_get_inode_info_unix(struct inode **pinode,
cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
}
+ if (S_ISLNK(fattr.cf_mode) && !fattr.cf_symlink_target) {
+ if (!server->ops->query_symlink)
+ return -EOPNOTSUPP;
+ rc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path,
+ &fattr.cf_symlink_target, false);
+ if (rc) {
+ cifs_dbg(FYI, "%s: query_symlink: %d\n", __func__, rc);
+ goto cgiiu_exit;
+ }
+ }
+
if (*pinode == NULL) {
/* get new inode */
cifs_fill_uniqueid(sb, &fattr);
@@ -432,6 +464,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
}
cgiiu_exit:
+ kfree(fattr.cf_symlink_target);
return rc;
}
#else
@@ -601,10 +634,10 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
}
/* Fill a cifs_fattr struct with info from POSIX info struct */
-static void
-smb311_posix_info_to_fattr(struct cifs_fattr *fattr, struct smb311_posix_qinfo *info,
- struct super_block *sb, bool adjust_tz, bool symlink)
+static void smb311_posix_info_to_fattr(struct cifs_fattr *fattr, struct cifs_open_info_data *data,
+ struct super_block *sb, bool adjust_tz, bool symlink)
{
+ struct smb311_posix_qinfo *info = &data->posix_fi;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
@@ -639,6 +672,8 @@ smb311_posix_info_to_fattr(struct cifs_fattr *fattr, struct smb311_posix_qinfo *
if (symlink) {
fattr->cf_mode |= S_IFLNK;
fattr->cf_dtype = DT_LNK;
+ fattr->cf_symlink_target = data->symlink_target;
+ data->symlink_target = NULL;
} else if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
fattr->cf_mode |= S_IFDIR;
fattr->cf_dtype = DT_DIR;
@@ -655,13 +690,11 @@ smb311_posix_info_to_fattr(struct cifs_fattr *fattr, struct smb311_posix_qinfo *
fattr->cf_mode, fattr->cf_uniqueid, fattr->cf_nlink);
}
-
-/* Fill a cifs_fattr struct with info from FILE_ALL_INFO */
-static void
-cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
- struct super_block *sb, bool adjust_tz,
- bool symlink, u32 reparse_tag)
+static void cifs_open_info_to_fattr(struct cifs_fattr *fattr, struct cifs_open_info_data *data,
+ struct super_block *sb, bool adjust_tz, bool symlink,
+ u32 reparse_tag)
{
+ struct smb2_file_all_info *info = &data->fi;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
@@ -703,7 +736,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
} else if (reparse_tag == IO_REPARSE_TAG_LX_BLK) {
fattr->cf_mode |= S_IFBLK | cifs_sb->ctx->file_mode;
fattr->cf_dtype = DT_BLK;
- } else if (symlink) { /* TODO add more reparse tag checks */
+ } else if (symlink || reparse_tag == IO_REPARSE_TAG_SYMLINK ||
+ reparse_tag == IO_REPARSE_TAG_NFS) {
fattr->cf_mode = S_IFLNK;
fattr->cf_dtype = DT_LNK;
} else if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
@@ -735,6 +769,11 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
}
}
+ if (S_ISLNK(fattr->cf_mode)) {
+ fattr->cf_symlink_target = data->symlink_target;
+ data->symlink_target = NULL;
+ }
+
fattr->cf_uid = cifs_sb->ctx->linux_uid;
fattr->cf_gid = cifs_sb->ctx->linux_gid;
}
@@ -744,23 +783,28 @@ cifs_get_file_info(struct file *filp)
{
int rc;
unsigned int xid;
- FILE_ALL_INFO find_data;
+ struct cifs_open_info_data data = {};
struct cifs_fattr fattr;
struct inode *inode = file_inode(filp);
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
+ bool symlink = false;
+ u32 tag = 0;
if (!server->ops->query_file_info)
return -ENOSYS;
xid = get_xid();
- rc = server->ops->query_file_info(xid, tcon, &cfile->fid, &find_data);
+ rc = server->ops->query_file_info(xid, tcon, cfile, &data);
switch (rc) {
case 0:
/* TODO: add support to query reparse tag */
- cifs_all_info_to_fattr(&fattr, &find_data, inode->i_sb, false,
- false, 0 /* no reparse tag */);
+ if (data.symlink_target) {
+ symlink = true;
+ tag = IO_REPARSE_TAG_SYMLINK;
+ }
+ cifs_open_info_to_fattr(&fattr, &data, inode->i_sb, false, symlink, tag);
break;
case -EREMOTE:
cifs_create_dfs_fattr(&fattr, inode->i_sb);
@@ -789,6 +833,7 @@ cifs_get_file_info(struct file *filp)
/* if filetype is different, return error */
rc = cifs_fattr_to_inode(inode, &fattr);
cgfi_exit:
+ cifs_free_open_info(&data);
free_xid(xid);
return rc;
}
@@ -860,14 +905,9 @@ cifs_backup_query_path_info(int xid,
}
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
-static void
-cifs_set_fattr_ino(int xid,
- struct cifs_tcon *tcon,
- struct super_block *sb,
- struct inode **inode,
- const char *full_path,
- FILE_ALL_INFO *data,
- struct cifs_fattr *fattr)
+static void cifs_set_fattr_ino(int xid, struct cifs_tcon *tcon, struct super_block *sb,
+ struct inode **inode, const char *full_path,
+ struct cifs_open_info_data *data, struct cifs_fattr *fattr)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -885,11 +925,8 @@ cifs_set_fattr_ino(int xid,
* If we have an inode pass a NULL tcon to ensure we don't
* make a round trip to the server. This only works for SMB2+.
*/
- rc = server->ops->get_srv_inum(xid,
- *inode ? NULL : tcon,
- cifs_sb, full_path,
- &fattr->cf_uniqueid,
- data);
+ rc = server->ops->get_srv_inum(xid, *inode ? NULL : tcon, cifs_sb, full_path,
+ &fattr->cf_uniqueid, data);
if (rc) {
/*
* If that fails reuse existing ino or generate one
@@ -923,14 +960,10 @@ static inline bool is_inode_cache_good(struct inode *ino)
return ino && CIFS_CACHE_READ(CIFS_I(ino)) && CIFS_I(ino)->time != 0;
}
-int
-cifs_get_inode_info(struct inode **inode,
- const char *full_path,
- FILE_ALL_INFO *in_data,
- struct super_block *sb, int xid,
- const struct cifs_fid *fid)
+int cifs_get_inode_info(struct inode **inode, const char *full_path,
+ struct cifs_open_info_data *data, struct super_block *sb, int xid,
+ const struct cifs_fid *fid)
{
-
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct tcon_link *tlink;
@@ -938,8 +971,7 @@ cifs_get_inode_info(struct inode **inode,
bool adjust_tz = false;
struct cifs_fattr fattr = {0};
bool is_reparse_point = false;
- FILE_ALL_INFO *data = in_data;
- FILE_ALL_INFO *tmp_data = NULL;
+ struct cifs_open_info_data tmp_data = {};
void *smb1_backup_rsp_buf = NULL;
int rc = 0;
int tmprc = 0;
@@ -960,21 +992,15 @@ cifs_get_inode_info(struct inode **inode,
cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
goto out;
}
- tmp_data = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (!tmp_data) {
- rc = -ENOMEM;
- goto out;
- }
- rc = server->ops->query_path_info(xid, tcon, cifs_sb,
- full_path, tmp_data,
- &adjust_tz, &is_reparse_point);
+ rc = server->ops->query_path_info(xid, tcon, cifs_sb, full_path, &tmp_data,
+ &adjust_tz, &is_reparse_point);
#ifdef CONFIG_CIFS_DFS_UPCALL
if (rc == -ENOENT && is_tcon_dfs(tcon))
rc = cifs_dfs_query_info_nonascii_quirk(xid, tcon,
cifs_sb,
full_path);
#endif
- data = tmp_data;
+ data = &tmp_data;
}
/*
@@ -988,14 +1014,24 @@ cifs_get_inode_info(struct inode **inode,
* since we have to check if its reparse tag matches a known
* special file type e.g. symlink or fifo or char etc.
*/
- if ((le32_to_cpu(data->Attributes) & ATTR_REPARSE) &&
- server->ops->query_reparse_tag) {
- rc = server->ops->query_reparse_tag(xid, tcon, cifs_sb,
- full_path, &reparse_tag);
- cifs_dbg(FYI, "reparse tag 0x%x\n", reparse_tag);
+ if (is_reparse_point && data->symlink_target) {
+ reparse_tag = IO_REPARSE_TAG_SYMLINK;
+ } else if ((le32_to_cpu(data->fi.Attributes) & ATTR_REPARSE) &&
+ server->ops->query_reparse_tag) {
+ tmprc = server->ops->query_reparse_tag(xid, tcon, cifs_sb, full_path,
+ &reparse_tag);
+ if (tmprc)
+ cifs_dbg(FYI, "%s: query_reparse_tag: rc = %d\n", __func__, tmprc);
+ if (server->ops->query_symlink) {
+ tmprc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path,
+ &data->symlink_target,
+ is_reparse_point);
+ if (tmprc)
+ cifs_dbg(FYI, "%s: query_symlink: rc = %d\n", __func__,
+ tmprc);
+ }
}
- cifs_all_info_to_fattr(&fattr, data, sb, adjust_tz,
- is_reparse_point, reparse_tag);
+ cifs_open_info_to_fattr(&fattr, data, sb, adjust_tz, is_reparse_point, reparse_tag);
break;
case -EREMOTE:
/* DFS link, no metadata available on this server */
@@ -1014,18 +1050,20 @@ cifs_get_inode_info(struct inode **inode,
*/
if (backup_cred(cifs_sb) && is_smb1_server(server)) {
/* for easier reading */
+ FILE_ALL_INFO *fi;
FILE_DIRECTORY_INFO *fdi;
SEARCH_ID_FULL_DIR_INFO *si;
rc = cifs_backup_query_path_info(xid, tcon, sb,
full_path,
&smb1_backup_rsp_buf,
- &data);
+ &fi);
if (rc)
goto out;
- fdi = (FILE_DIRECTORY_INFO *)data;
- si = (SEARCH_ID_FULL_DIR_INFO *)data;
+ move_cifs_info_to_smb2(&data->fi, fi);
+ fdi = (FILE_DIRECTORY_INFO *)fi;
+ si = (SEARCH_ID_FULL_DIR_INFO *)fi;
cifs_dir_info_to_fattr(&fattr, fdi, cifs_sb);
fattr.cf_uniqueid = le64_to_cpu(si->UniqueId);
@@ -1123,7 +1161,8 @@ handle_mnt_opt:
out:
cifs_buf_release(smb1_backup_rsp_buf);
cifs_put_tlink(tlink);
- kfree(tmp_data);
+ cifs_free_open_info(&tmp_data);
+ kfree(fattr.cf_symlink_target);
return rc;
}
@@ -1138,7 +1177,7 @@ smb311_posix_get_inode_info(struct inode **inode,
bool adjust_tz = false;
struct cifs_fattr fattr = {0};
bool symlink = false;
- struct smb311_posix_qinfo *data = NULL;
+ struct cifs_open_info_data data = {};
int rc = 0;
int tmprc = 0;
@@ -1155,15 +1194,9 @@ smb311_posix_get_inode_info(struct inode **inode,
cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
goto out;
}
- data = kmalloc(sizeof(struct smb311_posix_qinfo), GFP_KERNEL);
- if (!data) {
- rc = -ENOMEM;
- goto out;
- }
- rc = smb311_posix_query_path_info(xid, tcon, cifs_sb,
- full_path, data,
- &adjust_tz, &symlink);
+ rc = smb311_posix_query_path_info(xid, tcon, cifs_sb, full_path, &data, &adjust_tz,
+ &symlink);
/*
* 2. Convert it to internal cifs metadata (fattr)
@@ -1171,7 +1204,7 @@ smb311_posix_get_inode_info(struct inode **inode,
switch (rc) {
case 0:
- smb311_posix_info_to_fattr(&fattr, data, sb, adjust_tz, symlink);
+ smb311_posix_info_to_fattr(&fattr, &data, sb, adjust_tz, symlink);
break;
case -EREMOTE:
/* DFS link, no metadata available on this server */
@@ -1228,7 +1261,8 @@ smb311_posix_get_inode_info(struct inode **inode,
}
out:
cifs_put_tlink(tlink);
- kfree(data);
+ cifs_free_open_info(&data);
+ kfree(fattr.cf_symlink_target);
return rc;
}
@@ -2265,13 +2299,13 @@ cifs_dentry_needs_reval(struct dentry *dentry)
return true;
if (!open_cached_dir_by_dentry(tcon, dentry->d_parent, &cfid)) {
- mutex_lock(&cfid->fid_mutex);
+ spin_lock(&cfid->fid_lock);
if (cfid->time && cifs_i->time > cfid->time) {
- mutex_unlock(&cfid->fid_mutex);
+ spin_unlock(&cfid->fid_lock);
close_cached_dir(cfid);
return false;
}
- mutex_unlock(&cfid->fid_mutex);
+ spin_unlock(&cfid->fid_lock);
close_cached_dir(cfid);
}
/*
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index b6e6e5d6c8dd..89d5fa887364 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -484,12 +484,35 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
tcon = tlink_tcon(tlink);
if (tcon && tcon->ses->server->ops->notify) {
rc = tcon->ses->server->ops->notify(xid,
- filep, (void __user *)arg);
+ filep, (void __user *)arg,
+ false /* no ret data */);
cifs_dbg(FYI, "ioctl notify rc %d\n", rc);
} else
rc = -EOPNOTSUPP;
cifs_put_tlink(tlink);
break;
+ case CIFS_IOC_NOTIFY_INFO:
+ if (!S_ISDIR(inode->i_mode)) {
+ /* Notify can only be done on directories */
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ cifs_sb = CIFS_SB(inode->i_sb);
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ break;
+ }
+ tcon = tlink_tcon(tlink);
+ if (tcon && tcon->ses->server->ops->notify) {
+ rc = tcon->ses->server->ops->notify(xid,
+ filep, (void __user *)arg,
+ true /* return details */);
+ cifs_dbg(FYI, "ioctl notify info rc %d\n", rc);
+ } else
+ rc = -EOPNOTSUPP;
+ cifs_put_tlink(tlink);
+ break;
case CIFS_IOC_SHUTDOWN:
rc = cifs_shutdown(inode->i_sb, arg);
break;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index cd29c296cec6..bd374feeccaa 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -201,40 +201,6 @@ out:
return rc;
}
-static int
-query_mf_symlink(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const unsigned char *path,
- char **symlinkinfo)
-{
- int rc;
- u8 *buf = NULL;
- unsigned int link_len = 0;
- unsigned int bytes_read = 0;
-
- buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (tcon->ses->server->ops->query_mf_symlink)
- rc = tcon->ses->server->ops->query_mf_symlink(xid, tcon,
- cifs_sb, path, buf, &bytes_read);
- else
- rc = -ENOSYS;
-
- if (rc)
- goto out;
-
- if (bytes_read == 0) { /* not a symlink */
- rc = -EINVAL;
- goto out;
- }
-
- rc = parse_mf_symlink(buf, bytes_read, &link_len, symlinkinfo);
-out:
- kfree(buf);
- return rc;
-}
-
int
check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
@@ -244,6 +210,7 @@ check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
u8 *buf = NULL;
unsigned int link_len = 0;
unsigned int bytes_read = 0;
+ char *symlink = NULL;
if (!couldbe_mf_symlink(fattr))
/* it's not a symlink */
@@ -265,7 +232,7 @@ check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
if (bytes_read == 0) /* not a symlink */
goto out;
- rc = parse_mf_symlink(buf, bytes_read, &link_len, NULL);
+ rc = parse_mf_symlink(buf, bytes_read, &link_len, &symlink);
if (rc == -EINVAL) {
/* it's not a symlink */
rc = 0;
@@ -280,6 +247,7 @@ check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
fattr->cf_mode &= ~S_IFMT;
fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
fattr->cf_dtype = DT_LNK;
+ fattr->cf_symlink_target = symlink;
out:
kfree(buf);
return rc;
@@ -599,75 +567,6 @@ cifs_hl_exit:
return rc;
}
-const char *
-cifs_get_link(struct dentry *direntry, struct inode *inode,
- struct delayed_call *done)
-{
- int rc = -ENOMEM;
- unsigned int xid;
- const char *full_path;
- void *page;
- char *target_path = NULL;
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct tcon_link *tlink = NULL;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
-
- if (!direntry)
- return ERR_PTR(-ECHILD);
-
- xid = get_xid();
-
- tlink = cifs_sb_tlink(cifs_sb);
- if (IS_ERR(tlink)) {
- free_xid(xid);
- return ERR_CAST(tlink);
- }
- tcon = tlink_tcon(tlink);
- server = tcon->ses->server;
-
- page = alloc_dentry_path();
- full_path = build_path_from_dentry(direntry, page);
- if (IS_ERR(full_path)) {
- free_xid(xid);
- cifs_put_tlink(tlink);
- free_dentry_path(page);
- return ERR_CAST(full_path);
- }
-
- cifs_dbg(FYI, "Full path: %s inode = 0x%p\n", full_path, inode);
-
- rc = -EACCES;
- /*
- * First try Minshall+French Symlinks, if configured
- * and fallback to UNIX Extensions Symlinks.
- */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
- rc = query_mf_symlink(xid, tcon, cifs_sb, full_path,
- &target_path);
-
- if (rc != 0 && server->ops->query_symlink) {
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- bool reparse_point = false;
-
- if (cifsi->cifsAttrs & ATTR_REPARSE)
- reparse_point = true;
-
- rc = server->ops->query_symlink(xid, tcon, cifs_sb, full_path,
- &target_path, reparse_point);
- }
-
- free_dentry_path(page);
- free_xid(xid);
- cifs_put_tlink(tlink);
- if (rc != 0) {
- kfree(target_path);
- return ERR_PTR(rc);
- }
- set_delayed_call(done, kfree_link, target_path);
- return target_path;
-}
-
int
cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode,
struct dentry *direntry, const char *symname)
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 8e060c00c969..2d75ba5aaa8a 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -844,17 +844,34 @@ static bool emit_cached_dirents(struct cached_dirents *cde,
struct dir_context *ctx)
{
struct cached_dirent *dirent;
- int rc;
+ bool rc;
list_for_each_entry(dirent, &cde->entries, entry) {
- if (ctx->pos >= dirent->pos)
+ /*
+ * Skip all early entries prior to the current lseek()
+ * position.
+ */
+ if (ctx->pos > dirent->pos)
continue;
+ /*
+ * We recorded the current ->pos value for the dirent
+ * when we stored it in the cache.
+ * However, this sequence of ->pos values may have holes
+ * in it, for example dot-dirs returned from the server
+ * are suppressed.
+ * Handle this bu forcing ctx->pos to be the same as the
+ * ->pos of the current dirent we emit from the cache.
+ * This means that when we emit these entries from the cache
+ * we now emit them with the same ->pos value as in the
+ * initial scan.
+ */
ctx->pos = dirent->pos;
rc = dir_emit(ctx, dirent->name, dirent->namelen,
dirent->fattr.cf_uniqueid,
dirent->fattr.cf_dtype);
if (!rc)
return rc;
+ ctx->pos++;
}
return true;
}
@@ -994,6 +1011,8 @@ static int cifs_filldir(char *find_entry, struct file *file,
cifs_unix_basic_to_fattr(&fattr,
&((FILE_UNIX_INFO *)find_entry)->basic,
cifs_sb);
+ if (S_ISLNK(fattr.cf_mode))
+ fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
break;
case SMB_FIND_FILE_INFO_STANDARD:
cifs_std_info_to_fattr(&fattr,
@@ -1202,10 +1221,10 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
ctx->pos, tmp_buf);
cifs_save_resume_key(current_entry, cifsFile);
break;
- } else
- current_entry =
- nxt_dir_entry(current_entry, end_of_smb,
- cifsFile->srch_inf.info_level);
+ }
+ current_entry =
+ nxt_dir_entry(current_entry, end_of_smb,
+ cifsFile->srch_inf.info_level);
}
kfree(tmp_buf);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index f1c3c6d9146c..0435d1dfa9e1 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -601,11 +601,6 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
/* BB FIXME add check that strings total less
than 335 or will need to send them as arrays */
- /* unicode strings, must be word aligned before the call */
-/* if ((long) bcc_ptr % 2) {
- *bcc_ptr = 0;
- bcc_ptr++;
- } */
/* copy user */
if (ses->user_name == NULL) {
/* null user mount */
@@ -1213,16 +1208,18 @@ out_free_smb_buf:
static void
sess_free_buffer(struct sess_data *sess_data)
{
- int i;
+ struct kvec *iov = sess_data->iov;
- /* zero the session data before freeing, as it might contain sensitive info (keys, etc) */
- for (i = 0; i < 3; i++)
- if (sess_data->iov[i].iov_base)
- memzero_explicit(sess_data->iov[i].iov_base, sess_data->iov[i].iov_len);
+ /*
+ * Zero the session data before freeing, as it might contain sensitive info (keys, etc).
+ * Note that iov[1] is already freed by caller.
+ */
+ if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base)
+ memzero_explicit(iov[0].iov_base, iov[0].iov_len);
- free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
+ free_rsp_buf(sess_data->buf0_type, iov[0].iov_base);
sess_data->buf0_type = CIFS_NO_BUFFER;
- kfree(sess_data->iov[2].iov_base);
+ kfree_sensitive(iov[2].iov_base);
}
static int
@@ -1324,7 +1321,7 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
}
if (ses->capabilities & CAP_UNICODE) {
- if (sess_data->iov[0].iov_len % 2) {
+ if (!IS_ALIGNED(sess_data->iov[0].iov_len, 2)) {
*bcc_ptr = 0;
bcc_ptr++;
}
@@ -1364,7 +1361,7 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
/* no string area to decode, do nothing */
} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
/* unicode string area must be word-aligned */
- if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
+ if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
++bcc_ptr;
--bytes_remaining;
}
@@ -1448,8 +1445,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
if (ses->capabilities & CAP_UNICODE) {
/* unicode strings must be word aligned */
- if ((sess_data->iov[0].iov_len
- + sess_data->iov[1].iov_len) % 2) {
+ if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
*bcc_ptr = 0;
bcc_ptr++;
}
@@ -1500,7 +1496,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
/* no string area to decode, do nothing */
} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
/* unicode string area must be word-aligned */
- if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
+ if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
++bcc_ptr;
--bytes_remaining;
}
@@ -1552,7 +1548,7 @@ _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data)
bcc_ptr = sess_data->iov[2].iov_base;
/* unicode strings must be word aligned */
- if ((sess_data->iov[0].iov_len + sess_data->iov[1].iov_len) % 2) {
+ if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
*bcc_ptr = 0;
bcc_ptr++;
}
@@ -1753,7 +1749,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
/* no string area to decode, do nothing */
} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
/* unicode string area must be word-aligned */
- if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
+ if (!IS_ALIGNED((unsigned long)bcc_ptr - (unsigned long)smb_buf, 2)) {
++bcc_ptr;
--bytes_remaining;
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index f36b2d2d40ca..50480751e521 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -542,31 +542,32 @@ cifs_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
-static int
-cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- FILE_ALL_INFO *data, bool *adjustTZ, bool *symlink)
+static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ struct cifs_open_info_data *data, bool *adjustTZ, bool *symlink)
{
int rc;
+ FILE_ALL_INFO fi = {};
*symlink = false;
/* could do find first instead but this returns more info */
- rc = CIFSSMBQPathInfo(xid, tcon, full_path, data, 0 /* not legacy */,
- cifs_sb->local_nls, cifs_remap(cifs_sb));
+ rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
/*
* BB optimize code so we do not make the above call when server claims
* no NT SMB support and the above call failed at least once - set flag
* in tcon or mount.
*/
if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
- rc = SMBQueryInformation(xid, tcon, full_path, data,
- cifs_sb->local_nls,
+ rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls,
cifs_remap(cifs_sb));
+ if (!rc)
+ move_cifs_info_to_smb2(&data->fi, &fi);
*adjustTZ = true;
}
- if (!rc && (le32_to_cpu(data->Attributes) & ATTR_REPARSE)) {
+ if (!rc && (le32_to_cpu(fi.Attributes) & ATTR_REPARSE)) {
int tmprc;
int oplock = 0;
struct cifs_fid fid;
@@ -592,10 +593,9 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
-static int
-cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- u64 *uniqueid, FILE_ALL_INFO *data)
+static int cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ u64 *uniqueid, struct cifs_open_info_data *unused)
{
/*
* We can not use the IndexNumber field by default from Windows or
@@ -613,11 +613,22 @@ cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
cifs_remap(cifs_sb));
}
-static int
-cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_fid *fid, FILE_ALL_INFO *data)
+static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile, struct cifs_open_info_data *data)
{
- return CIFSSMBQFileInfo(xid, tcon, fid->netfid, data);
+ int rc;
+ FILE_ALL_INFO fi = {};
+
+ if (cfile->symlink_target) {
+ data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+ if (!data->symlink_target)
+ return -ENOMEM;
+ }
+
+ rc = CIFSSMBQFileInfo(xid, tcon, cfile->fid.netfid, &fi);
+ if (!rc)
+ move_cifs_info_to_smb2(&data->fi, &fi);
+ return rc;
}
static void
@@ -702,19 +713,20 @@ cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
cifsInode->cifsAttrs = dosattrs;
}
-static int
-cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
- __u32 *oplock, FILE_ALL_INFO *buf)
+static int cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
+ void *buf)
{
+ FILE_ALL_INFO *fi = buf;
+
if (!(oparms->tcon->ses->capabilities & CAP_NT_SMBS))
return SMBLegacyOpen(xid, oparms->tcon, oparms->path,
oparms->disposition,
oparms->desired_access,
oparms->create_options,
- &oparms->fid->netfid, oplock, buf,
+ &oparms->fid->netfid, oplock, fi,
oparms->cifs_sb->local_nls,
cifs_remap(oparms->cifs_sb));
- return CIFS_open(xid, oparms, oplock, buf);
+ return CIFS_open(xid, oparms, oplock, fi);
}
static void
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 9dfd2dd612c2..ffbd9a99fc12 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -20,40 +20,125 @@
#include "cifs_unicode.h"
#include "fscache.h"
#include "smb2proto.h"
+#include "smb2status.h"
-int
-smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
- __u32 *oplock, FILE_ALL_INFO *buf)
+static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov)
+{
+ struct smb2_err_rsp *err = iov->iov_base;
+ struct smb2_symlink_err_rsp *sym = ERR_PTR(-EINVAL);
+ u32 len;
+
+ if (err->ErrorContextCount) {
+ struct smb2_error_context_rsp *p, *end;
+
+ len = (u32)err->ErrorContextCount * (offsetof(struct smb2_error_context_rsp,
+ ErrorContextData) +
+ sizeof(struct smb2_symlink_err_rsp));
+ if (le32_to_cpu(err->ByteCount) < len || iov->iov_len < len + sizeof(*err))
+ return ERR_PTR(-EINVAL);
+
+ p = (struct smb2_error_context_rsp *)err->ErrorData;
+ end = (struct smb2_error_context_rsp *)((u8 *)err + iov->iov_len);
+ do {
+ if (le32_to_cpu(p->ErrorId) == SMB2_ERROR_ID_DEFAULT) {
+ sym = (struct smb2_symlink_err_rsp *)&p->ErrorContextData;
+ break;
+ }
+ cifs_dbg(FYI, "%s: skipping unhandled error context: 0x%x\n",
+ __func__, le32_to_cpu(p->ErrorId));
+
+ len = ALIGN(le32_to_cpu(p->ErrorDataLength), 8);
+ p = (struct smb2_error_context_rsp *)((u8 *)&p->ErrorContextData + len);
+ } while (p < end);
+ } else if (le32_to_cpu(err->ByteCount) >= sizeof(*sym) &&
+ iov->iov_len >= SMB2_SYMLINK_STRUCT_SIZE) {
+ sym = (struct smb2_symlink_err_rsp *)err->ErrorData;
+ }
+
+ if (!IS_ERR(sym) && (le32_to_cpu(sym->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
+ le32_to_cpu(sym->ReparseTag) != IO_REPARSE_TAG_SYMLINK))
+ sym = ERR_PTR(-EINVAL);
+
+ return sym;
+}
+
+int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, char **path)
+{
+ struct smb2_symlink_err_rsp *sym;
+ unsigned int sub_offs, sub_len;
+ unsigned int print_offs, print_len;
+ char *s;
+
+ if (!cifs_sb || !iov || !iov->iov_base || !iov->iov_len || !path)
+ return -EINVAL;
+
+ sym = symlink_data(iov);
+ if (IS_ERR(sym))
+ return PTR_ERR(sym);
+
+ sub_len = le16_to_cpu(sym->SubstituteNameLength);
+ sub_offs = le16_to_cpu(sym->SubstituteNameOffset);
+ print_len = le16_to_cpu(sym->PrintNameLength);
+ print_offs = le16_to_cpu(sym->PrintNameOffset);
+
+ if (iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offs + sub_len ||
+ iov->iov_len < SMB2_SYMLINK_STRUCT_SIZE + print_offs + print_len)
+ return -EINVAL;
+
+ s = cifs_strndup_from_utf16((char *)sym->PathBuffer + sub_offs, sub_len, true,
+ cifs_sb->local_nls);
+ if (!s)
+ return -ENOMEM;
+ convert_delimiter(s, '/');
+ cifs_dbg(FYI, "%s: symlink target: %s\n", __func__, s);
+
+ *path = s;
+ return 0;
+}
+
+int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock, void *buf)
{
int rc;
__le16 *smb2_path;
- struct smb2_file_all_info *smb2_data = NULL;
__u8 smb2_oplock;
+ struct cifs_open_info_data *data = buf;
+ struct smb2_file_all_info file_info = {};
+ struct smb2_file_all_info *smb2_data = data ? &file_info : NULL;
+ struct kvec err_iov = {};
+ int err_buftype = CIFS_NO_BUFFER;
struct cifs_fid *fid = oparms->fid;
struct network_resiliency_req nr_ioctl_req;
smb2_path = cifs_convert_path_to_utf16(oparms->path, oparms->cifs_sb);
- if (smb2_path == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
- GFP_KERNEL);
- if (smb2_data == NULL) {
- rc = -ENOMEM;
- goto out;
- }
+ if (smb2_path == NULL)
+ return -ENOMEM;
oparms->desired_access |= FILE_READ_ATTRIBUTES;
smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
- rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL,
- NULL, NULL);
+ rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
+ &err_buftype);
+ if (rc && data) {
+ struct smb2_hdr *hdr = err_iov.iov_base;
+
+ if (unlikely(!err_iov.iov_base || err_buftype == CIFS_NO_BUFFER))
+ rc = -ENOMEM;
+ else if (hdr->Status == STATUS_STOPPED_ON_SYMLINK) {
+ rc = smb2_parse_symlink_response(oparms->cifs_sb, &err_iov,
+ &data->symlink_target);
+ if (!rc) {
+ memset(smb2_data, 0, sizeof(*smb2_data));
+ oparms->create_options |= OPEN_REPARSE_POINT;
+ rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data,
+ NULL, NULL, NULL);
+ oparms->create_options &= ~OPEN_REPARSE_POINT;
+ }
+ }
+ }
+
if (rc)
goto out;
-
if (oparms->tcon->use_resilient) {
/* default timeout is 0, servers pick default (120 seconds) */
nr_ioctl_req.Timeout =
@@ -73,7 +158,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
rc = 0;
}
- if (buf) {
+ if (smb2_data) {
/* if open response does not have IndexNumber field - get it */
if (smb2_data->IndexNumber == 0) {
rc = SMB2_get_srv_num(xid, oparms->tcon,
@@ -89,12 +174,12 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
rc = 0;
}
}
- move_smb2_info_to_cifs(buf, smb2_data);
+ memcpy(&data->fi, smb2_data, sizeof(data->fi));
}
*oplock = smb2_oplock;
out:
- kfree(smb2_data);
+ free_rsp_buf(err_buftype, err_iov.iov_base);
kfree(smb2_path);
return rc;
}
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index bb3e3d5a0cda..a6640e6ea58b 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -24,6 +24,7 @@
#include "smb2pdu.h"
#include "smb2proto.h"
#include "cached_dir.h"
+#include "smb2status.h"
static void
free_set_inf_compound(struct smb_rqst *rqst)
@@ -50,13 +51,15 @@ struct cop_vars {
/*
* note: If cfile is passed, the reference to it is dropped here.
* So make sure that you do not reuse cfile after return from this func.
+ *
+ * If passing @err_iov and @err_buftype, ensure to make them both large enough (>= 3) to hold all
+ * error responses. Caller is also responsible for freeing them up.
*/
-static int
-smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- __u32 desired_access, __u32 create_disposition,
- __u32 create_options, umode_t mode, void *ptr, int command,
- struct cifsFileInfo *cfile)
+static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ __u32 desired_access, __u32 create_disposition, __u32 create_options,
+ umode_t mode, void *ptr, int command, struct cifsFileInfo *cfile,
+ struct kvec *err_iov, int *err_buftype)
{
struct cop_vars *vars = NULL;
struct kvec *rsp_iov;
@@ -70,6 +73,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
int num_rqst = 0;
int resp_buftype[3];
struct smb2_query_info_rsp *qi_rsp = NULL;
+ struct cifs_open_info_data *idata;
int flags = 0;
__u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
unsigned int size[2];
@@ -385,14 +389,19 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
switch (command) {
case SMB2_OP_QUERY_INFO:
+ idata = ptr;
+ if (rc == 0 && cfile && cfile->symlink_target) {
+ idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+ if (!idata->symlink_target)
+ rc = -ENOMEM;
+ }
if (rc == 0) {
qi_rsp = (struct smb2_query_info_rsp *)
rsp_iov[1].iov_base;
rc = smb2_validate_and_copy_iov(
le16_to_cpu(qi_rsp->OutputBufferOffset),
le32_to_cpu(qi_rsp->OutputBufferLength),
- &rsp_iov[1], sizeof(struct smb2_file_all_info),
- ptr);
+ &rsp_iov[1], sizeof(idata->fi), (char *)&idata->fi);
}
if (rqst[1].rq_iov)
SMB2_query_info_free(&rqst[1]);
@@ -406,13 +415,20 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
tcon->tid);
break;
case SMB2_OP_POSIX_QUERY_INFO:
+ idata = ptr;
+ if (rc == 0 && cfile && cfile->symlink_target) {
+ idata->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+ if (!idata->symlink_target)
+ rc = -ENOMEM;
+ }
if (rc == 0) {
qi_rsp = (struct smb2_query_info_rsp *)
rsp_iov[1].iov_base;
rc = smb2_validate_and_copy_iov(
le16_to_cpu(qi_rsp->OutputBufferOffset),
le32_to_cpu(qi_rsp->OutputBufferLength),
- &rsp_iov[1], sizeof(struct smb311_posix_qinfo) /* add SIDs */, ptr);
+ &rsp_iov[1], sizeof(idata->posix_fi) /* add SIDs */,
+ (char *)&idata->posix_fi);
}
if (rqst[1].rq_iov)
SMB2_query_info_free(&rqst[1]);
@@ -477,42 +493,33 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
free_set_inf_compound(rqst);
break;
}
- free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
- free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
- free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+
+ if (rc && err_iov && err_buftype) {
+ memcpy(err_iov, rsp_iov, 3 * sizeof(*err_iov));
+ memcpy(err_buftype, resp_buftype, 3 * sizeof(*err_buftype));
+ } else {
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+ }
kfree(vars);
return rc;
}
-void
-move_smb2_info_to_cifs(FILE_ALL_INFO *dst, struct smb2_file_all_info *src)
-{
- memcpy(dst, src, (size_t)(&src->CurrentByteOffset) - (size_t)src);
- dst->CurrentByteOffset = src->CurrentByteOffset;
- dst->Mode = src->Mode;
- dst->AlignmentRequirement = src->AlignmentRequirement;
- dst->IndexNumber1 = 0; /* we don't use it */
-}
-
-int
-smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- FILE_ALL_INFO *data, bool *adjust_tz, bool *reparse)
+int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse)
{
int rc;
- struct smb2_file_all_info *smb2_data;
__u32 create_options = 0;
struct cifsFileInfo *cfile;
struct cached_fid *cfid = NULL;
+ struct kvec err_iov[3] = {};
+ int err_buftype[3] = {};
*adjust_tz = false;
*reparse = false;
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
- GFP_KERNEL);
- if (smb2_data == NULL)
- return -ENOMEM;
-
if (strcmp(full_path, ""))
rc = -ENOENT;
else
@@ -520,63 +527,58 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
/* If it is a root and its handle is cached then use it */
if (!rc) {
if (cfid->file_all_info_is_valid) {
- move_smb2_info_to_cifs(data,
- &cfid->file_all_info);
+ memcpy(&data->fi, &cfid->file_all_info, sizeof(data->fi));
} else {
- rc = SMB2_query_info(xid, tcon,
- cfid->fid.persistent_fid,
- cfid->fid.volatile_fid, smb2_data);
- if (!rc)
- move_smb2_info_to_cifs(data, smb2_data);
+ rc = SMB2_query_info(xid, tcon, cfid->fid.persistent_fid,
+ cfid->fid.volatile_fid, &data->fi);
}
close_cached_dir(cfid);
- goto out;
+ return rc;
}
cifs_get_readable_path(tcon, full_path, &cfile);
- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
- ACL_NO_MODE, smb2_data, SMB2_OP_QUERY_INFO, cfile);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN,
+ create_options, ACL_NO_MODE, data, SMB2_OP_QUERY_INFO, cfile,
+ err_iov, err_buftype);
if (rc == -EOPNOTSUPP) {
+ if (err_iov[0].iov_base && err_buftype[0] != CIFS_NO_BUFFER &&
+ ((struct smb2_hdr *)err_iov[0].iov_base)->Command == SMB2_CREATE &&
+ ((struct smb2_hdr *)err_iov[0].iov_base)->Status == STATUS_STOPPED_ON_SYMLINK) {
+ rc = smb2_parse_symlink_response(cifs_sb, err_iov, &data->symlink_target);
+ if (rc)
+ goto out;
+ }
*reparse = true;
create_options |= OPEN_REPARSE_POINT;
/* Failed on a symbolic link - query a reparse point info */
cifs_get_readable_path(tcon, full_path, &cfile);
- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN,
- create_options, ACL_NO_MODE,
- smb2_data, SMB2_OP_QUERY_INFO, cfile);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES,
+ FILE_OPEN, create_options, ACL_NO_MODE, data,
+ SMB2_OP_QUERY_INFO, cfile, NULL, NULL);
}
- if (rc)
- goto out;
- move_smb2_info_to_cifs(data, smb2_data);
out:
- kfree(smb2_data);
+ free_rsp_buf(err_buftype[0], err_iov[0].iov_base);
+ free_rsp_buf(err_buftype[1], err_iov[1].iov_base);
+ free_rsp_buf(err_buftype[2], err_iov[2].iov_base);
return rc;
}
-int
-smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- struct smb311_posix_qinfo *data, bool *adjust_tz, bool *reparse)
+int smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse)
{
int rc;
__u32 create_options = 0;
struct cifsFileInfo *cfile;
- struct smb311_posix_qinfo *smb2_data;
+ struct kvec err_iov[3] = {};
+ int err_buftype[3] = {};
*adjust_tz = false;
*reparse = false;
- /* BB TODO: Make struct larger when add support for parsing owner SIDs */
- smb2_data = kzalloc(sizeof(struct smb311_posix_qinfo),
- GFP_KERNEL);
- if (smb2_data == NULL)
- return -ENOMEM;
-
/*
* BB TODO: Add support for using the cached root handle.
* Create SMB2_query_posix_info worker function to do non-compounded query
@@ -585,29 +587,32 @@ smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
*/
cifs_get_readable_path(tcon, full_path, &cfile);
- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
- ACL_NO_MODE, smb2_data, SMB2_OP_POSIX_QUERY_INFO, cfile);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES, FILE_OPEN,
+ create_options, ACL_NO_MODE, data, SMB2_OP_POSIX_QUERY_INFO, cfile,
+ err_iov, err_buftype);
if (rc == -EOPNOTSUPP) {
/* BB TODO: When support for special files added to Samba re-verify this path */
+ if (err_iov[0].iov_base && err_buftype[0] != CIFS_NO_BUFFER &&
+ ((struct smb2_hdr *)err_iov[0].iov_base)->Command == SMB2_CREATE &&
+ ((struct smb2_hdr *)err_iov[0].iov_base)->Status == STATUS_STOPPED_ON_SYMLINK) {
+ rc = smb2_parse_symlink_response(cifs_sb, err_iov, &data->symlink_target);
+ if (rc)
+ goto out;
+ }
*reparse = true;
create_options |= OPEN_REPARSE_POINT;
/* Failed on a symbolic link - query a reparse point info */
cifs_get_readable_path(tcon, full_path, &cfile);
- rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN,
- create_options, ACL_NO_MODE,
- smb2_data, SMB2_OP_POSIX_QUERY_INFO, cfile);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, FILE_READ_ATTRIBUTES,
+ FILE_OPEN, create_options, ACL_NO_MODE, data,
+ SMB2_OP_POSIX_QUERY_INFO, cfile, NULL, NULL);
}
- if (rc)
- goto out;
-
- /* TODO: will need to allow for the 2 SIDs when add support for getting owner UID/GID */
- memcpy(data, smb2_data, sizeof(struct smb311_posix_qinfo));
out:
- kfree(smb2_data);
+ free_rsp_buf(err_buftype[0], err_iov[0].iov_base);
+ free_rsp_buf(err_buftype[1], err_iov[1].iov_base);
+ free_rsp_buf(err_buftype[2], err_iov[2].iov_base);
return rc;
}
@@ -619,7 +624,7 @@ smb2_mkdir(const unsigned int xid, struct inode *parent_inode, umode_t mode,
return smb2_compound_op(xid, tcon, cifs_sb, name,
FILE_WRITE_ATTRIBUTES, FILE_CREATE,
CREATE_NOT_FILE, mode, NULL, SMB2_OP_MKDIR,
- NULL);
+ NULL, NULL, NULL);
}
void
@@ -641,7 +646,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
FILE_WRITE_ATTRIBUTES, FILE_CREATE,
CREATE_NOT_FILE, ACL_NO_MODE,
- &data, SMB2_OP_SET_INFO, cfile);
+ &data, SMB2_OP_SET_INFO, cfile, NULL, NULL);
if (tmprc == 0)
cifs_i->cifsAttrs = dosattrs;
}
@@ -652,7 +657,7 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
{
return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
CREATE_NOT_FILE, ACL_NO_MODE,
- NULL, SMB2_OP_RMDIR, NULL);
+ NULL, SMB2_OP_RMDIR, NULL, NULL, NULL);
}
int
@@ -661,7 +666,7 @@ smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
{
return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
- ACL_NO_MODE, NULL, SMB2_OP_DELETE, NULL);
+ ACL_NO_MODE, NULL, SMB2_OP_DELETE, NULL, NULL, NULL);
}
static int
@@ -680,7 +685,7 @@ smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
}
rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
FILE_OPEN, 0, ACL_NO_MODE, smb2_to_name,
- command, cfile);
+ command, cfile, NULL, NULL);
smb2_rename_path:
kfree(smb2_to_name);
return rc;
@@ -720,7 +725,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
return smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_WRITE_DATA, FILE_OPEN, 0, ACL_NO_MODE,
- &eof, SMB2_OP_SET_EOF, cfile);
+ &eof, SMB2_OP_SET_EOF, cfile, NULL, NULL);
}
int
@@ -746,7 +751,8 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_WRITE_ATTRIBUTES, FILE_OPEN,
- 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, cfile);
+ 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, cfile,
+ NULL, NULL);
cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 7db5c09ecceb..a38720477966 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -248,7 +248,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
* Some windows servers (win2016) will pad also the final
* PDU in a compound to 8 bytes.
*/
- if (((calc_len + 7) & ~7) == len)
+ if (ALIGN(calc_len, 8) == len)
return 0;
/*
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 5187250c5f66..17b25153cb68 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -550,7 +550,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
/* avoid spamming logs every 10 minutes, so log only in mount */
if ((ses->chan_max > 1) && in_mount)
cifs_dbg(VFS,
- "empty network interface list returned by server %s\n",
+ "multichannel not available\n"
+ "Empty network interface list returned by server %s\n",
ses->server->hostname);
rc = -EINVAL;
goto out;
@@ -800,7 +801,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
if (!rc) {
- if (cfid->is_valid) {
+ if (cfid->has_lease) {
close_cached_dir(cfid);
return 0;
}
@@ -830,33 +831,25 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
-static int
-smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- u64 *uniqueid, FILE_ALL_INFO *data)
+static int smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ u64 *uniqueid, struct cifs_open_info_data *data)
{
- *uniqueid = le64_to_cpu(data->IndexNumber);
+ *uniqueid = le64_to_cpu(data->fi.IndexNumber);
return 0;
}
-static int
-smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_fid *fid, FILE_ALL_INFO *data)
+static int smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile, struct cifs_open_info_data *data)
{
- int rc;
- struct smb2_file_all_info *smb2_data;
-
- smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
- GFP_KERNEL);
- if (smb2_data == NULL)
- return -ENOMEM;
+ struct cifs_fid *fid = &cfile->fid;
- rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
- smb2_data);
- if (!rc)
- move_smb2_info_to_cifs(data, smb2_data);
- kfree(smb2_data);
- return rc;
+ if (cfile->symlink_target) {
+ data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
+ if (!data->symlink_target)
+ return -ENOMEM;
+ }
+ return SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid, &data->fi);
}
#ifdef CONFIG_CIFS_XATTR
@@ -2025,9 +2018,10 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
static int
smb3_notify(const unsigned int xid, struct file *pfile,
- void __user *ioc_buf)
+ void __user *ioc_buf, bool return_changes)
{
- struct smb3_notify notify;
+ struct smb3_notify_info notify;
+ struct smb3_notify_info __user *pnotify_buf;
struct dentry *dentry = pfile->f_path.dentry;
struct inode *inode = file_inode(pfile);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -2035,10 +2029,12 @@ smb3_notify(const unsigned int xid, struct file *pfile,
struct cifs_fid fid;
struct cifs_tcon *tcon;
const unsigned char *path;
+ char *returned_ioctl_info = NULL;
void *page = alloc_dentry_path();
__le16 *utf16_path = NULL;
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
int rc = 0;
+ __u32 ret_len = 0;
path = build_path_from_dentry(dentry, page);
if (IS_ERR(path)) {
@@ -2052,9 +2048,17 @@ smb3_notify(const unsigned int xid, struct file *pfile,
goto notify_exit;
}
- if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
- rc = -EFAULT;
- goto notify_exit;
+ if (return_changes) {
+ if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify_info))) {
+ rc = -EFAULT;
+ goto notify_exit;
+ }
+ } else {
+ if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
+ rc = -EFAULT;
+ goto notify_exit;
+ }
+ notify.data_len = 0;
}
tcon = cifs_sb_master_tcon(cifs_sb);
@@ -2071,12 +2075,22 @@ smb3_notify(const unsigned int xid, struct file *pfile,
goto notify_exit;
rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
- notify.watch_tree, notify.completion_filter);
+ notify.watch_tree, notify.completion_filter,
+ notify.data_len, &returned_ioctl_info, &ret_len);
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
-
+ if (return_changes && (ret_len > 0) && (notify.data_len > 0)) {
+ if (ret_len > notify.data_len)
+ ret_len = notify.data_len;
+ pnotify_buf = (struct smb3_notify_info __user *)ioc_buf;
+ if (copy_to_user(pnotify_buf->notify_data, returned_ioctl_info, ret_len))
+ rc = -EFAULT;
+ else if (copy_to_user(&pnotify_buf->data_len, &ret_len, sizeof(ret_len)))
+ rc = -EFAULT;
+ }
+ kfree(returned_ioctl_info);
notify_exit:
free_dentry_path(page);
kfree(utf16_path);
@@ -2827,9 +2841,6 @@ parse_reparse_point(struct reparse_data_buffer *buf,
}
}
-#define SMB2_SYMLINK_STRUCT_SIZE \
- (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
-
static int
smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path,
@@ -2841,13 +2852,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct cifs_fid fid;
struct kvec err_iov = {NULL, 0};
- struct smb2_err_rsp *err_buf = NULL;
- struct smb2_symlink_err_rsp *symlink;
struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
- unsigned int sub_len;
- unsigned int sub_offset;
- unsigned int print_len;
- unsigned int print_offset;
int flags = CIFS_CP_CREATE_CLOSE_OP;
struct smb_rqst rqst[3];
int resp_buftype[3];
@@ -2964,47 +2969,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
goto querty_exit;
}
- err_buf = err_iov.iov_base;
- if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
- err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
- rc = -EINVAL;
- goto querty_exit;
- }
-
- symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
- if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
- le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
- rc = -EINVAL;
- goto querty_exit;
- }
-
- /* open must fail on symlink - reset rc */
- rc = 0;
- sub_len = le16_to_cpu(symlink->SubstituteNameLength);
- sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
- print_len = le16_to_cpu(symlink->PrintNameLength);
- print_offset = le16_to_cpu(symlink->PrintNameOffset);
-
- if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
- rc = -EINVAL;
- goto querty_exit;
- }
-
- if (err_iov.iov_len <
- SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
- rc = -EINVAL;
- goto querty_exit;
- }
-
- *target_path = cifs_strndup_from_utf16(
- (char *)symlink->PathBuffer + sub_offset,
- sub_len, true, cifs_sb->local_nls);
- if (!(*target_path)) {
- rc = -ENOMEM;
- goto querty_exit;
- }
- convert_delimiter(*target_path, '/');
- cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
+ rc = smb2_parse_symlink_response(cifs_sb, &err_iov, target_path);
querty_exit:
cifs_dbg(FYI, "query symlink rc %d\n", rc);
@@ -5114,7 +5079,7 @@ smb2_make_node(unsigned int xid, struct inode *inode,
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
int rc = -EPERM;
- FILE_ALL_INFO *buf = NULL;
+ struct cifs_open_info_data buf = {};
struct cifs_io_parms io_parms = {0};
__u32 oplock = 0;
struct cifs_fid fid;
@@ -5130,7 +5095,7 @@ smb2_make_node(unsigned int xid, struct inode *inode,
* and was used by default in earlier versions of Windows
*/
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
- goto out;
+ return rc;
/*
* TODO: Add ability to create instead via reparse point. Windows (e.g.
@@ -5139,16 +5104,10 @@ smb2_make_node(unsigned int xid, struct inode *inode,
*/
if (!S_ISCHR(mode) && !S_ISBLK(mode))
- goto out;
+ return rc;
cifs_dbg(FYI, "sfu compat create special file\n");
- buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (buf == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = GENERIC_WRITE;
@@ -5163,21 +5122,21 @@ smb2_make_node(unsigned int xid, struct inode *inode,
oplock = REQ_OPLOCK;
else
oplock = 0;
- rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
+ rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
if (rc)
- goto out;
+ return rc;
/*
* BB Do not bother to decode buf since no local inode yet to put
* timestamps in, but we can reuse it safely.
*/
- pdev = (struct win_dev *)buf;
+ pdev = (struct win_dev *)&buf.fi;
io_parms.pid = current->tgid;
io_parms.tcon = tcon;
io_parms.offset = 0;
io_parms.length = sizeof(struct win_dev);
- iov[1].iov_base = buf;
+ iov[1].iov_base = &buf.fi;
iov[1].iov_len = sizeof(struct win_dev);
if (S_ISCHR(mode)) {
memcpy(pdev->type, "IntxCHR", 8);
@@ -5196,8 +5155,8 @@ smb2_make_node(unsigned int xid, struct inode *inode,
d_drop(dentry);
/* FIXME: add code here to set EAs */
-out:
- kfree(buf);
+
+ cifs_free_open_info(&buf);
return rc;
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index b3c4d2e54eaa..a2384509ea84 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -466,15 +466,14 @@ build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
/*
* Context Data length must be rounded to multiple of 8 for some servers
*/
- pneg_ctxt->DataLength = cpu_to_le16(DIV_ROUND_UP(
- sizeof(struct smb2_signing_capabilities) -
- sizeof(struct smb2_neg_context) +
- (num_algs * 2 /* sizeof u16 */), 8) * 8);
+ pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) -
+ sizeof(struct smb2_neg_context) +
+ (num_algs * sizeof(u16)), 8));
pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
- ctxt_len += 2 /* sizeof le16 */ * num_algs;
- ctxt_len = DIV_ROUND_UP(ctxt_len, 8) * 8;
+ ctxt_len += sizeof(__le16) * num_algs;
+ ctxt_len = ALIGN(ctxt_len, 8);
return ctxt_len;
/* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
}
@@ -511,8 +510,7 @@ build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
/* copy up to max of first 100 bytes of server name to NetName field */
pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
/* context size is DataLength + minimal smb2_neg_context */
- return DIV_ROUND_UP(le16_to_cpu(pneg_ctxt->DataLength) +
- sizeof(struct smb2_neg_context), 8) * 8;
+ return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8);
}
static void
@@ -557,18 +555,18 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
* round up total_len of fixed part of SMB3 negotiate request to 8
* byte boundary before adding negotiate contexts
*/
- *total_len = roundup(*total_len, 8);
+ *total_len = ALIGN(*total_len, 8);
pneg_ctxt = (*total_len) + (char *)req;
req->NegotiateContextOffset = cpu_to_le32(*total_len);
build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
- ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8;
+ ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8);
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
- ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_encryption_neg_context), 8) * 8;
+ ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8);
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
@@ -595,9 +593,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
if (server->compress_algorithm) {
build_compression_ctxt((struct smb2_compression_capabilities_context *)
pneg_ctxt);
- ctxt_len = DIV_ROUND_UP(
- sizeof(struct smb2_compression_capabilities_context),
- 8) * 8;
+ ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8);
*total_len += ctxt_len;
pneg_ctxt += ctxt_len;
neg_context_count++;
@@ -780,7 +776,7 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
if (rc)
break;
/* offsets must be 8 byte aligned */
- clen = (clen + 7) & ~0x7;
+ clen = ALIGN(clen, 8);
offset += clen + sizeof(struct smb2_neg_context);
len_of_ctxts -= clen;
}
@@ -2424,9 +2420,9 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
unsigned int acelen, acl_size, ace_count;
unsigned int owner_offset = 0;
unsigned int group_offset = 0;
- struct smb3_acl acl;
+ struct smb3_acl acl = {};
- *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
+ *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
if (set_owner) {
/* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
@@ -2497,10 +2493,11 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
acl.AclSize = cpu_to_le16(acl_size);
acl.AceCount = cpu_to_le16(ace_count);
+ /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */
memcpy(aclptr, &acl, sizeof(struct smb3_acl));
buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
- *len = roundup(ptr - (__u8 *)buf, 8);
+ *len = round_up((unsigned int)(ptr - (__u8 *)buf), 8);
return buf;
}
@@ -2594,7 +2591,7 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
* final path needs to be 8-byte aligned as specified in
* MS-SMB2 2.2.13 SMB2 CREATE Request.
*/
- *out_size = roundup(*out_len * sizeof(__le16), 8);
+ *out_size = round_up(*out_len * sizeof(__le16), 8);
*out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL);
if (!*out_path)
return -ENOMEM;
@@ -2839,9 +2836,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
/* MUST set path len (NameLength) to 0 opening root of share */
req->NameLength = cpu_to_le16(uni_path_len - 2);
- copy_size = uni_path_len;
- if (copy_size % 8 != 0)
- copy_size = roundup(copy_size, 8);
+ copy_size = round_up(uni_path_len, 8);
copy_path = kzalloc(copy_size, GFP_KERNEL);
if (!copy_path)
return -ENOMEM;
@@ -3485,7 +3480,7 @@ smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
if (rc)
return rc;
- memcpy(data, begin_of_buf, buffer_length);
+ memcpy(data, begin_of_buf, minbufsize);
return 0;
}
@@ -3609,7 +3604,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength),
- &rsp_iov, min_len, *data);
+ &rsp_iov, dlen ? *dlen : min_len, *data);
if (rc && allocated) {
kfree(*data);
*data = NULL;
@@ -3715,11 +3710,13 @@ SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
int
SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, bool watch_tree,
- u32 completion_filter)
+ u32 completion_filter, u32 max_out_data_len, char **out_data,
+ u32 *plen /* returned data len */)
{
struct cifs_ses *ses = tcon->ses;
struct TCP_Server_Info *server = cifs_pick_channel(ses);
struct smb_rqst rqst;
+ struct smb2_change_notify_rsp *smb_rsp;
struct kvec iov[1];
struct kvec rsp_iov = {NULL, 0};
int resp_buftype = CIFS_NO_BUFFER;
@@ -3735,6 +3732,9 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
memset(&rqst, 0, sizeof(struct smb_rqst));
memset(&iov, 0, sizeof(iov));
+ if (plen)
+ *plen = 0;
+
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
@@ -3753,9 +3753,28 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
(u8)watch_tree, completion_filter, rc);
- } else
+ } else {
trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
- ses->Suid, (u8)watch_tree, completion_filter);
+ ses->Suid, (u8)watch_tree, completion_filter);
+ /* validate that notify information is plausible */
+ if ((rsp_iov.iov_base == NULL) ||
+ (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp)))
+ goto cnotify_exit;
+
+ smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base;
+
+ smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
+ le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov,
+ sizeof(struct file_notify_information));
+
+ *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset),
+ le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL);
+ if (*out_data == NULL) {
+ rc = -ENOMEM;
+ goto cnotify_exit;
+ } else
+ *plen = le32_to_cpu(smb_rsp->OutputBufferLength);
+ }
cnotify_exit:
if (rqst.rq_iov)
@@ -4103,7 +4122,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
if (request_type & CHAINED_REQUEST) {
if (!(request_type & END_OF_CHAIN)) {
/* next 8-byte aligned request */
- *total_len = DIV_ROUND_UP(*total_len, 8) * 8;
+ *total_len = ALIGN(*total_len, 8);
shdr->NextCommand = cpu_to_le32(*total_len);
} else /* END_OF_CHAIN */
shdr->NextCommand = 0;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index f57881b8464f..1237bb86e93a 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -56,6 +56,9 @@ struct smb2_rdma_crypto_transform {
#define COMPOUND_FID 0xFFFFFFFFFFFFFFFFULL
+#define SMB2_SYMLINK_STRUCT_SIZE \
+ (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
+
#define SYMLINK_ERROR_TAG 0x4c4d5953
struct smb2_symlink_err_rsp {
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 3f740f24b96a..be21b5d26f67 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -53,16 +53,12 @@ extern bool smb2_is_valid_oplock_break(char *buffer,
struct TCP_Server_Info *srv);
extern int smb3_handle_read_data(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
-
-extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst,
- struct smb2_file_all_info *src);
extern int smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *path,
__u32 *reparse_tag);
-extern int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb,
- const char *full_path, FILE_ALL_INFO *data,
- bool *adjust_tz, bool *symlink);
+int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse);
extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
const char *full_path, __u64 size,
struct cifs_sb_info *cifs_sb, bool set_alloc);
@@ -95,9 +91,9 @@ extern int smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const unsigned char *path, char *pbuf,
unsigned int *pbytes_read);
-extern int smb2_open_file(const unsigned int xid,
- struct cifs_open_parms *oparms,
- __u32 *oplock, FILE_ALL_INFO *buf);
+int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, char **path);
+int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
+ void *buf);
extern int smb2_unlock_range(struct cifsFileInfo *cfile,
struct file_lock *flock, const unsigned int xid);
extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
@@ -148,7 +144,8 @@ extern int SMB2_ioctl_init(struct cifs_tcon *tcon,
extern void SMB2_ioctl_free(struct smb_rqst *rqst);
extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, bool watch_tree,
- u32 completion_filter);
+ u32 completion_filter, u32 max_out_data_len,
+ char **out_data, u32 *plen /* returned data len */);
extern int __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
@@ -278,9 +275,9 @@ extern int smb2_query_info_compound(const unsigned int xid,
struct kvec *rsp, int *buftype,
struct cifs_sb_info *cifs_sb);
/* query path info from the server using SMB311 POSIX extensions*/
-extern int smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *sb, const char *path, struct smb311_posix_qinfo *qinf,
- bool *adjust_tx, bool *symlink);
+int smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse);
int posix_info_parse(const void *beg, const void *end,
struct smb2_posix_info_parsed *out);
int posix_info_sid_size(const void *beg, const void *end);
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index 998cd26a1b3b..fe05bc51f9f2 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -590,14 +590,17 @@ struct erofs_fscache *erofs_domain_register_cookie(struct super_block *sb,
struct super_block *psb = erofs_pseudo_mnt->mnt_sb;
mutex_lock(&erofs_domain_cookies_lock);
+ spin_lock(&psb->s_inode_list_lock);
list_for_each_entry(inode, &psb->s_inodes, i_sb_list) {
ctx = inode->i_private;
if (!ctx || ctx->domain != domain || strcmp(ctx->name, name))
continue;
igrab(inode);
+ spin_unlock(&psb->s_inode_list_lock);
mutex_unlock(&erofs_domain_cookies_lock);
return ctx;
}
+ spin_unlock(&psb->s_inode_list_lock);
ctx = erofs_fscache_domain_init_cookie(sb, name, need_inode);
mutex_unlock(&erofs_domain_cookies_lock);
return ctx;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 559380a535af..c7f24fc7efd5 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -813,15 +813,14 @@ retry:
++spiltted;
if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
fe->pcl->multibases = true;
-
- if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
- !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
- fe->pcl->length == map->m_llen)
- fe->pcl->partial = false;
if (fe->pcl->length < offset + end - map->m_la) {
fe->pcl->length = offset + end - map->m_la;
fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
}
+ if ((map->m_flags & EROFS_MAP_FULL_MAPPED) &&
+ !(map->m_flags & EROFS_MAP_PARTIAL_REF) &&
+ fe->pcl->length == map->m_llen)
+ fe->pcl->partial = false;
next_part:
/* shorten the remaining extent to update progress */
map->m_llen = offset + cur - map->m_la;
@@ -888,15 +887,13 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) {
unsigned int pgnr;
- struct page *oldpage;
pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
DBG_BUGON(pgnr >= be->nr_pages);
- oldpage = be->decompressed_pages[pgnr];
- be->decompressed_pages[pgnr] = bvec->page;
-
- if (!oldpage)
+ if (!be->decompressed_pages[pgnr]) {
+ be->decompressed_pages[pgnr] = bvec->page;
return;
+ }
}
/* (cold path) one pcluster is requested multiple times */
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
index e7f04c4fbb81..d98c95212985 100644
--- a/fs/erofs/zdata.h
+++ b/fs/erofs/zdata.h
@@ -126,10 +126,10 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
}
/*
- * bit 31: I/O error occurred on this page
- * bit 0 - 30: remaining parts to complete this page
+ * bit 30: I/O error occurred on this page
+ * bit 0 - 29: remaining parts to complete this page
*/
-#define Z_EROFS_PAGE_EIO (1 << 31)
+#define Z_EROFS_PAGE_EIO (1 << 30)
static inline void z_erofs_onlinepage_init(struct page *page)
{
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 44c27ef39c43..0bb66927e3d0 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -57,8 +57,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
vi->xattr_isize, 8);
- kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos),
- EROFS_KMAP_ATOMIC);
+ kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
if (IS_ERR(kaddr)) {
err = PTR_ERR(kaddr);
goto out_unlock;
@@ -73,7 +72,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
vi->z_tailextent_headlcn = 0;
- goto unmap_done;
+ goto done;
}
vi->z_advise = le16_to_cpu(h->h_advise);
vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
@@ -85,7 +84,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel",
headnr + 1, vi->z_algorithmtype[headnr], vi->nid);
err = -EOPNOTSUPP;
- goto unmap_done;
+ goto out_put_metabuf;
}
vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
@@ -95,7 +94,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
- goto unmap_done;
+ goto out_put_metabuf;
}
if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
@@ -103,12 +102,8 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu",
vi->nid);
err = -EFSCORRUPTED;
- goto unmap_done;
+ goto out_put_metabuf;
}
-unmap_done:
- erofs_put_metabuf(&buf);
- if (err)
- goto out_unlock;
if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
struct erofs_map_blocks map = {
@@ -127,7 +122,7 @@ unmap_done:
err = -EFSCORRUPTED;
}
if (err < 0)
- goto out_unlock;
+ goto out_put_metabuf;
}
if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
@@ -141,11 +136,14 @@ unmap_done:
EROFS_GET_BLOCKS_FINDTAIL);
erofs_put_metabuf(&map.buf);
if (err < 0)
- goto out_unlock;
+ goto out_put_metabuf;
}
+done:
/* paired with smp_mb() at the beginning of the function */
smp_mb();
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+out_put_metabuf:
+ erofs_put_metabuf(&buf);
out_unlock:
clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
return err;
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index a795437b86d0..5590a1e83126 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -552,7 +552,7 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
inode_inc_iversion(inode);
- inode->i_generation = prandom_u32();
+ inode->i_generation = get_random_u32();
if (info->attr & ATTR_SUBDIR) { /* directory */
inode->i_generation &= ~1;
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 998dd2ac8008..f4944c4dee60 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -277,8 +277,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
int best_ndir = inodes_per_group;
int best_group = -1;
- group = prandom_u32();
- parent_group = (unsigned)group % ngroups;
+ parent_group = prandom_u32_max(ngroups);
for (i = 0; i < ngroups; i++) {
group = (parent_group + i) % ngroups;
desc = ext2_get_group_desc (sb, group, NULL);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 208b87ce8858..e9bc46684106 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -463,10 +463,9 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
hinfo.hash_version = DX_HASH_HALF_MD4;
hinfo.seed = sbi->s_hash_seed;
ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
- grp = hinfo.hash;
+ parent_group = hinfo.hash % ngroups;
} else
- grp = prandom_u32();
- parent_group = (unsigned)grp % ngroups;
+ parent_group = prandom_u32_max(ngroups);
for (i = 0; i < ngroups; i++) {
g = (parent_group + i) % ngroups;
get_orlov_stats(sb, g, flex_size, &stats);
@@ -1280,7 +1279,7 @@ got:
EXT4_GROUP_INFO_IBITMAP_CORRUPT);
goto out;
}
- inode->i_generation = prandom_u32();
+ inode->i_generation = get_random_u32();
/* Precompute checksum seed for inode metadata */
if (ext4_has_metadata_csum(sb)) {
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 4d49c5cfb690..ded535535b27 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -454,8 +454,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
inode->i_ctime = inode_bl->i_ctime = current_time(inode);
inode_inc_iversion(inode);
- inode->i_generation = prandom_u32();
- inode_bl->i_generation = prandom_u32();
+ inode->i_generation = get_random_u32();
+ inode_bl->i_generation = get_random_u32();
ext4_reset_inode_seed(inode);
ext4_reset_inode_seed(inode_bl);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 9af68a7ecdcf..588cb09c5291 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -265,7 +265,7 @@ static unsigned int mmp_new_seq(void)
u32 new_seq;
do {
- new_seq = prandom_u32();
+ new_seq = get_random_u32();
} while (new_seq > EXT4_MMP_SEQ_MAX);
return new_seq;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d733db8a0b02..989365b878a6 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3782,8 +3782,7 @@ cont_thread:
}
if (!progress) {
elr->lr_next_sched = jiffies +
- (prandom_u32()
- % (EXT4_DEF_LI_MAX_START_DELAY * HZ));
+ prandom_u32_max(EXT4_DEF_LI_MAX_START_DELAY * HZ);
}
if (time_before(elr->lr_next_sched, next_wakeup))
next_wakeup = elr->lr_next_sched;
@@ -3930,8 +3929,8 @@ static struct ext4_li_request *ext4_li_request_new(struct super_block *sb,
* spread the inode table initialization requests
* better.
*/
- elr->lr_next_sched = jiffies + (prandom_u32() %
- (EXT4_DEF_LI_MAX_START_DELAY * HZ));
+ elr->lr_next_sched = jiffies + prandom_u32_max(
+ EXT4_DEF_LI_MAX_START_DELAY * HZ);
return elr;
}
diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c
index 20cadfb740dc..3c640bd7ecae 100644
--- a/fs/ext4/verity.c
+++ b/fs/ext4/verity.c
@@ -363,13 +363,14 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
- DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
struct page *page;
index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
if (!page || !PageUptodate(page)) {
+ DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
+
if (page)
put_page(page);
else if (num_ra_pages > 1)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index d36bcb23ccfe..4546e01b2ee0 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -282,7 +282,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
/* let's select beginning hot/small space first in no_heap mode*/
if (f2fs_need_rand_seg(sbi))
- p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
+ p->offset = prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec);
else if (test_opt(sbi, NOHEAP) &&
(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
p->offset = 0;
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index d5065a5af1f8..a389772fd212 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -50,7 +50,7 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
F2FS_I(inode)->i_crtime = inode->i_mtime;
- inode->i_generation = prandom_u32();
+ inode->i_generation = get_random_u32();
if (S_ISDIR(inode->i_mode))
F2FS_I(inode)->i_current_depth = 1;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 289bcb7ca300..acf3d3fa4363 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2534,7 +2534,7 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
sanity_check_seg_type(sbi, seg_type);
if (f2fs_need_rand_seg(sbi))
- return prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
+ return prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec);
/* if segs_per_sec is large than 1, we need to keep original policy. */
if (__is_large_section(sbi))
@@ -2588,7 +2588,7 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
curseg->alloc_type = LFS;
if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
curseg->fragment_remained_chunk =
- prandom_u32() % sbi->max_fragment_chunk + 1;
+ prandom_u32_max(sbi->max_fragment_chunk) + 1;
}
static int __next_free_blkoff(struct f2fs_sb_info *sbi,
@@ -2625,9 +2625,9 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
/* To allocate block chunks in different sizes, use random number */
if (--seg->fragment_remained_chunk <= 0) {
seg->fragment_remained_chunk =
- prandom_u32() % sbi->max_fragment_chunk + 1;
+ prandom_u32_max(sbi->max_fragment_chunk) + 1;
seg->next_blkoff +=
- prandom_u32() % sbi->max_fragment_hole + 1;
+ prandom_u32_max(sbi->max_fragment_hole) + 1;
}
}
}
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index f0805e51b3fe..c352fff88a5e 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -258,13 +258,14 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
pgoff_t index,
unsigned long num_ra_pages)
{
- DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
struct page *page;
index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
if (!page || !PageUptodate(page)) {
+ DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
+
if (page)
put_page(page);
else if (num_ra_pages > 1)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index a38238d75c08..1cbcc4608dc7 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -523,7 +523,7 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
inode_inc_iversion(inode);
- inode->i_generation = prandom_u32();
+ inode->i_generation = get_random_u32();
if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
inode->i_generation &= ~1;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 07881b76d42f..277468783fee 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -103,7 +103,7 @@ static char *__dentry_name(struct dentry *dentry, char *name)
*/
BUG_ON(p + strlen(p) + 1 != name + PATH_MAX);
- strlcpy(name, root, PATH_MAX);
+ strscpy(name, root, PATH_MAX);
if (len > p - name) {
__putname(name);
return NULL;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 198d7abf34e4..4e718500a00c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4375,8 +4375,8 @@ nfsd4_init_leases_net(struct nfsd_net *nn)
nn->nfsd4_grace = 90;
nn->somebody_reclaimed = false;
nn->track_reclaim_completes = false;
- nn->clverifier_counter = prandom_u32();
- nn->clientid_base = prandom_u32();
+ nn->clverifier_counter = get_random_u32();
+ nn->clientid_base = get_random_u32();
nn->clientid_counter = nn->clientid_base + 1;
nn->s2s_cp_cl_id = nn->clientid_counter++;
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index e7c494005122..0d611a6c5511 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -3819,7 +3819,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
}
log_init_pg_hdr(log, page_size, page_size, 1, 1);
- log_create(log, l_size, 0, get_random_int(), false, false);
+ log_create(log, l_size, 0, get_random_u32(), false, false);
log->ra = ra;
@@ -3893,7 +3893,7 @@ check_restart_area:
/* Do some checks based on whether we have a valid log page. */
if (!rst_info.valid_page) {
- open_log_count = get_random_int();
+ open_log_count = get_random_u32();
goto init_log_instance;
}
open_log_count = le32_to_cpu(ra2->open_log_count);
@@ -4044,7 +4044,7 @@ find_oldest:
memcpy(ra->clients, Add2Ptr(ra2, t16),
le16_to_cpu(ra2->ra_len) - t16);
- log->current_openlog_count = get_random_int();
+ log->current_openlog_count = get_random_u32();
ra->open_log_count = cpu_to_le32(log->current_openlog_count);
log->ra_size = offsetof(struct RESTART_AREA, clients) +
sizeof(struct CLIENT_REC);
diff --git a/fs/ubifs/crypto.c b/fs/ubifs/crypto.c
index c57b46a352d8..3125e76376ee 100644
--- a/fs/ubifs/crypto.c
+++ b/fs/ubifs/crypto.c
@@ -24,6 +24,17 @@ static bool ubifs_crypt_empty_dir(struct inode *inode)
return ubifs_check_dir_empty(inode) == 0;
}
+/**
+ * ubifs_encrypt - Encrypt data.
+ * @inode: inode which refers to the data node
+ * @dn: data node to encrypt
+ * @in_len: length of data to be compressed
+ * @out_len: allocated memory size for the data area of @dn
+ * @block: logical block number of the block
+ *
+ * This function encrypt a possibly-compressed data in the data node.
+ * The encrypted data length will store in @out_len.
+ */
int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
unsigned int in_len, unsigned int *out_len, int block)
{
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index fc718f6178f2..3f128b9fdfbb 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2467,7 +2467,7 @@ error_dump:
static inline int chance(unsigned int n, unsigned int out_of)
{
- return !!((prandom_u32() % out_of) + 1 <= n);
+ return !!(prandom_u32_max(out_of) + 1 <= n);
}
@@ -2485,13 +2485,13 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
if (chance(1, 2)) {
d->pc_delay = 1;
/* Fail within 1 minute */
- delay = prandom_u32() % 60000;
+ delay = prandom_u32_max(60000);
d->pc_timeout = jiffies;
d->pc_timeout += msecs_to_jiffies(delay);
ubifs_warn(c, "failing after %lums", delay);
} else {
d->pc_delay = 2;
- delay = prandom_u32() % 10000;
+ delay = prandom_u32_max(10000);
/* Fail within 10000 operations */
d->pc_cnt_max = delay;
ubifs_warn(c, "failing after %lu calls", delay);
@@ -2571,7 +2571,7 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf,
unsigned int from, to, ffs = chance(1, 2);
unsigned char *p = (void *)buf;
- from = prandom_u32() % len;
+ from = prandom_u32_max(len);
/* Corruption span max to end of write unit */
to = min(len, ALIGN(from + 1, c->max_write_size));
@@ -2581,7 +2581,7 @@ static int corrupt_data(const struct ubifs_info *c, const void *buf,
if (ffs)
memset(p + from, 0xFF, to - from);
else
- prandom_bytes(p + from, to - from);
+ get_random_bytes(p + from, to - from);
return to;
}
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index f59acd6a3615..0f29cf201136 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -68,13 +68,14 @@ static int inherit_flags(const struct inode *dir, umode_t mode)
* @c: UBIFS file-system description object
* @dir: parent directory inode
* @mode: inode mode flags
+ * @is_xattr: whether the inode is xattr inode
*
* This function finds an unused inode number, allocates new inode and
* initializes it. Returns new inode in case of success and an error code in
* case of failure.
*/
struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
- umode_t mode)
+ umode_t mode, bool is_xattr)
{
int err;
struct inode *inode;
@@ -99,10 +100,12 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
current_time(inode);
inode->i_mapping->nrpages = 0;
- err = fscrypt_prepare_new_inode(dir, inode, &encrypted);
- if (err) {
- ubifs_err(c, "fscrypt_prepare_new_inode failed: %i", err);
- goto out_iput;
+ if (!is_xattr) {
+ err = fscrypt_prepare_new_inode(dir, inode, &encrypted);
+ if (err) {
+ ubifs_err(c, "fscrypt_prepare_new_inode failed: %i", err);
+ goto out_iput;
+ }
}
switch (mode & S_IFMT) {
@@ -309,7 +312,7 @@ static int ubifs_create(struct user_namespace *mnt_userns, struct inode *dir,
sz_change = CALC_DENT_SIZE(fname_len(&nm));
- inode = ubifs_new_inode(c, dir, mode);
+ inode = ubifs_new_inode(c, dir, mode, false);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_fname;
@@ -370,7 +373,7 @@ static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
if (err)
return ERR_PTR(err);
- inode = ubifs_new_inode(c, dir, mode);
+ inode = ubifs_new_inode(c, dir, mode, false);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_free;
@@ -463,7 +466,7 @@ static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
return err;
}
- inode = ubifs_new_inode(c, dir, mode);
+ inode = ubifs_new_inode(c, dir, mode, false);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_budg;
@@ -873,7 +876,7 @@ out_fname:
}
/**
- * check_dir_empty - check if a directory is empty or not.
+ * ubifs_check_dir_empty - check if a directory is empty or not.
* @dir: VFS inode object of the directory to check
*
* This function checks if directory @dir is empty. Returns zero if the
@@ -1005,7 +1008,7 @@ static int ubifs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
sz_change = CALC_DENT_SIZE(fname_len(&nm));
- inode = ubifs_new_inode(c, dir, S_IFDIR | mode);
+ inode = ubifs_new_inode(c, dir, S_IFDIR | mode, false);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_fname;
@@ -1092,7 +1095,7 @@ static int ubifs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
sz_change = CALC_DENT_SIZE(fname_len(&nm));
- inode = ubifs_new_inode(c, dir, mode);
+ inode = ubifs_new_inode(c, dir, mode, false);
if (IS_ERR(inode)) {
kfree(dev);
err = PTR_ERR(inode);
@@ -1174,7 +1177,7 @@ static int ubifs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
sz_change = CALC_DENT_SIZE(fname_len(&nm));
- inode = ubifs_new_inode(c, dir, S_IFLNK | S_IRWXUGO);
+ inode = ubifs_new_inode(c, dir, S_IFLNK | S_IRWXUGO, false);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_fname;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 75dab0ae3939..d02509920baf 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -503,7 +503,7 @@ static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
{
if (c->double_hash)
- dent->cookie = (__force __le32) prandom_u32();
+ dent->cookie = (__force __le32) get_random_u32();
else
dent->cookie = 0;
}
@@ -1472,23 +1472,25 @@ out_free:
* @block: data block number
* @dn: data node to re-compress
* @new_len: new length
+ * @dn_size: size of the data node @dn in memory
*
* This function is used when an inode is truncated and the last data node of
* the inode has to be re-compressed/encrypted and re-written.
*/
static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode,
unsigned int block, struct ubifs_data_node *dn,
- int *new_len)
+ int *new_len, int dn_size)
{
void *buf;
- int err, dlen, compr_type, out_len, old_dlen;
+ int err, dlen, compr_type, out_len, data_size;
out_len = le32_to_cpu(dn->size);
buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
if (!buf)
return -ENOMEM;
- dlen = old_dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
+ dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
+ data_size = dn_size - UBIFS_DATA_NODE_SZ;
compr_type = le16_to_cpu(dn->compr_type);
if (IS_ENCRYPTED(inode)) {
@@ -1508,11 +1510,11 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
}
if (IS_ENCRYPTED(inode)) {
- err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block);
+ err = ubifs_encrypt(inode, dn, out_len, &data_size, block);
if (err)
goto out;
- out_len = old_dlen;
+ out_len = data_size;
} else {
dn->compr_size = 0;
}
@@ -1550,6 +1552,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
struct ubifs_trun_node *trun;
struct ubifs_data_node *dn;
int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
+ int dn_size;
struct ubifs_inode *ui = ubifs_inode(inode);
ino_t inum = inode->i_ino;
unsigned int blk;
@@ -1562,10 +1565,13 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
ubifs_assert(c, S_ISREG(inode->i_mode));
ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
- sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
- UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
+ dn_size = COMPRESSED_DATA_NODE_BUF_SZ;
- sz += ubifs_auth_node_sz(c);
+ if (IS_ENCRYPTED(inode))
+ dn_size += UBIFS_CIPHER_BLOCK_SIZE;
+
+ sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
+ dn_size + ubifs_auth_node_sz(c);
ino = kmalloc(sz, GFP_NOFS);
if (!ino)
@@ -1596,15 +1602,15 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
ubifs_err(c, "bad data node (block %u, inode %lu)",
blk, inode->i_ino);
- ubifs_dump_node(c, dn, sz - UBIFS_INO_NODE_SZ -
- UBIFS_TRUN_NODE_SZ);
+ ubifs_dump_node(c, dn, dn_size);
goto out_free;
}
if (dn_len <= dlen)
dlen = 0; /* Nothing to do */
else {
- err = truncate_data_node(c, inode, blk, dn, &dlen);
+ err = truncate_data_node(c, inode, blk, dn,
+ &dlen, dn_size);
if (err)
goto out_free;
}
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index d76a19e460cd..cfbc31f709f4 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -1970,28 +1970,28 @@ static int dbg_populate_lsave(struct ubifs_info *c)
if (!dbg_is_chk_gen(c))
return 0;
- if (prandom_u32() & 3)
+ if (prandom_u32_max(4))
return 0;
for (i = 0; i < c->lsave_cnt; i++)
c->lsave[i] = c->main_first;
list_for_each_entry(lprops, &c->empty_list, list)
- c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
+ c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum;
list_for_each_entry(lprops, &c->freeable_list, list)
- c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
+ c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum;
list_for_each_entry(lprops, &c->frdi_idx_list, list)
- c->lsave[prandom_u32() % c->lsave_cnt] = lprops->lnum;
+ c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum;
heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
for (i = 0; i < heap->cnt; i++)
- c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
+ c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum;
heap = &c->lpt_heap[LPROPS_DIRTY - 1];
for (i = 0; i < heap->cnt; i++)
- c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
+ c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum;
heap = &c->lpt_heap[LPROPS_FREE - 1];
for (i = 0; i < heap->cnt; i++)
- c->lsave[prandom_u32() % c->lsave_cnt] = heap->arr[i]->lnum;
+ c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum;
return 1;
}
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index 58c92c96ecef..01362ad5f804 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -700,7 +700,7 @@ static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
c->ilebs[c->ileb_cnt++] = lnum;
dbg_cmt("LEB %d", lnum);
}
- if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
+ if (dbg_is_chk_index(c) && !prandom_u32_max(8))
return -ENOSPC;
return 0;
}
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 7d6d2f152e03..478bbbb5382f 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -2026,7 +2026,7 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time, int flags);
/* dir.c */
struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
- umode_t mode);
+ umode_t mode, bool is_xattr);
int ubifs_getattr(struct user_namespace *mnt_userns, const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
int ubifs_check_dir_empty(struct inode *dir);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index e4c4761aff7f..3db8486e3725 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -110,7 +110,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
if (err)
return err;
- inode = ubifs_new_inode(c, host, S_IFREG | S_IRWXUGO);
+ inode = ubifs_new_inode(c, host, S_IFREG | S_IRWXUGO, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out_budg;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index e2bdf089c0a3..6261599bb389 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1520,7 +1520,7 @@ xfs_alloc_ag_vextent_lastblock(
#ifdef DEBUG
/* Randomly don't execute the first algorithm. */
- if (prandom_u32() & 1)
+ if (prandom_u32_max(2))
return 0;
#endif
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 6cdfd64bc56b..94db50eb706a 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -636,7 +636,7 @@ xfs_ialloc_ag_alloc(
/* randomly do sparse inode allocations */
if (xfs_has_sparseinodes(tp->t_mountp) &&
igeo->ialloc_min_blks < igeo->ialloc_blks)
- do_sparse = prandom_u32() & 1;
+ do_sparse = prandom_u32_max(2);
#endif
/*
@@ -805,7 +805,7 @@ sparse_alloc:
* number from being easily guessable.
*/
error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, pag->pag_agno,
- args.agbno, args.len, prandom_u32());
+ args.agbno, args.len, get_random_u32());
if (error)
return error;
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 296faa41d81d..7db588ed0be5 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -274,7 +274,7 @@ xfs_errortag_test(
ASSERT(error_tag < XFS_ERRTAG_MAX);
randfactor = mp->m_errortag[error_tag];
- if (!randfactor || prandom_u32() % randfactor)
+ if (!randfactor || prandom_u32_max(randfactor))
return false;
xfs_warn_ratelimited(mp,
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 2bbe7916a998..eae7427062cf 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -596,7 +596,7 @@ xfs_iget_cache_miss(
*/
if (xfs_has_v3inodes(mp) &&
(flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
- VFS_I(ip)->i_generation = prandom_u32();
+ VFS_I(ip)->i_generation = get_random_u32();
} else {
struct xfs_buf *bp;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index f6e7e4fd72ae..f02a0dd522b3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3544,7 +3544,7 @@ xlog_ticket_alloc(
tic->t_curr_res = unit_res;
tic->t_cnt = cnt;
tic->t_ocnt = cnt;
- tic->t_tid = prandom_u32();
+ tic->t_tid = get_random_u32();
if (permanent)
tic->t_flags |= XLOG_TIC_PERM_RESERV;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3e187a02924f..50e358a19d98 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -580,9 +580,9 @@ struct request_queue {
#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
-#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_NOWAIT))
+#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
+ (1UL << QUEUE_FLAG_SAME_COMP) | \
+ (1UL << QUEUE_FLAG_NOWAIT))
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 8f481d1b159a..6e01f10f0d88 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -428,6 +428,9 @@ struct cgroup {
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
struct cgroup_file events_file; /* handle for "cgroup.events" */
+ /* handles for "{cpu,memory,io,irq}.pressure" */
+ struct cgroup_file psi_files[NR_PSI_RESOURCES];
+
/*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 23b102b4349e..528bd44b59e2 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -106,6 +106,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup *cgroup_get_from_path(const char *path);
struct cgroup *cgroup_get_from_fd(int fd);
+struct cgroup *cgroup_v1v2_get_from_fd(int fd);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
@@ -682,11 +683,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
-{
- return cgrp->psi;
-}
-
bool cgroup_psi_enabled(void);
static inline void cgroup_init_kthreadd(void)
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 2108b5695327..267cd06b54a0 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -42,6 +42,8 @@ struct dentry;
* struct clk_rate_request - Structure encoding the clk constraints that
* a clock user might require.
*
+ * Should be initialized by calling clk_hw_init_rate_request().
+ *
* @rate: Requested clock rate. This field will be adjusted by
* clock drivers according to hardware capabilities.
* @min_rate: Minimum rate imposed by clk users.
@@ -60,6 +62,15 @@ struct clk_rate_request {
struct clk_hw *best_parent_hw;
};
+void clk_hw_init_rate_request(const struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long rate);
+void clk_hw_forward_rate_request(const struct clk_hw *core,
+ const struct clk_rate_request *old_req,
+ const struct clk_hw *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate);
+
/**
* struct clk_duty - Struture encoding the duty cycle ratio of a clock
*
@@ -118,8 +129,9 @@ struct clk_duty {
*
* @recalc_rate Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
- * ensure that the prepare_mutex is held across this call.
- * Returns the calculated rate. Optional, but recommended - if
+ * ensure that the prepare_mutex is held across this call. If the
+ * driver cannot figure out a rate for this clock, it must return
+ * 0. Returns the calculated rate. Optional, but recommended - if
* this op is not set then clock rate will be initialized to 0.
*
* @round_rate: Given a target rate as input, returns the closest rate actually
@@ -1303,6 +1315,8 @@ int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long flags);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
+ unsigned long *max_rate);
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index c13061cabdfc..1ef013324237 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -799,7 +799,7 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
-bool clk_has_parent(struct clk *clk, struct clk *parent);
+bool clk_has_parent(const struct clk *clk, const struct clk *parent);
/**
* clk_set_rate_range - set a rate range for a clock source
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 3484309b59bf..7af499bdbecb 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -12,6 +12,8 @@
#ifndef AT91_PMC_H
#define AT91_PMC_H
+#include <linux/bits.h>
+
#define AT91_PMC_V1 (1) /* PMC version 1 */
#define AT91_PMC_V2 (2) /* PMC version 2 [SAM9X60] */
@@ -45,8 +47,8 @@
#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */
#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */
-#define AT91_PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL /* Default PLL ACR value for UPLL */
-#define AT91_PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL /* Default PLL ACR value for PLLA */
+#define AT91_PMC_PLL_ACR_DEFAULT_UPLL UL(0x12020010) /* Default PLL ACR value for UPLL */
+#define AT91_PMC_PLL_ACR_DEFAULT_PLLA UL(0x00020010) /* Default PLL ACR value for PLLA */
#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */
#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */
diff --git a/include/linux/clk/spear.h b/include/linux/clk/spear.h
index a64d034ceddd..eaf95ca656f8 100644
--- a/include/linux/clk/spear.h
+++ b/include/linux/clk/spear.h
@@ -8,6 +8,20 @@
#ifndef __LINUX_CLK_SPEAR_H
#define __LINUX_CLK_SPEAR_H
+#ifdef CONFIG_ARCH_SPEAR3XX
+void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base);
+#else
+static inline void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base) {}
+#endif
+
+#ifdef CONFIG_ARCH_SPEAR6XX
+void __init spear6xx_clk_init(void __iomem *misc_base);
+#else
+static inline void __init spear6xx_clk_init(void __iomem *misc_base) {}
+#endif
+
#ifdef CONFIG_MACH_SPEAR1310
void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base);
#else
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 2f065ad97541..c2aa0aa26b45 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -174,8 +174,9 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
static inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
- /* n is a prior cpu */
- cpumask_check(n + 1);
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
}
@@ -188,8 +189,9 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
*/
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
- /* n is a prior cpu */
- cpumask_check(n + 1);
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
}
@@ -229,8 +231,9 @@ static inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
const struct cpumask *src2p)
{
- /* n is a prior cpu */
- cpumask_check(n + 1);
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
nr_cpumask_bits, n + 1);
}
@@ -260,8 +263,8 @@ static inline
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{
cpumask_check(start);
- /* n is a prior cpu */
- cpumask_check(n + 1);
+ if (n != -1)
+ cpumask_check(n);
/*
* Return the first available CPU when wrapping, or when starting before cpu0,
diff --git a/include/linux/damon.h b/include/linux/damon.h
index ed5470f50bab..620ada094c3b 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -484,6 +484,12 @@ static inline struct damon_region *damon_first_region(struct damon_target *t)
return list_first_entry(&t->regions_list, struct damon_region, list);
}
+static inline unsigned long damon_sz_region(struct damon_region *r)
+{
+ return r->ar.end - r->ar.start;
+}
+
+
#define damon_for_each_region(r, t) \
list_for_each_entry(r, &t->regions_list, list)
diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h
index 50be7cbd93a5..b1b5720d89a5 100644
--- a/include/linux/dsa/tag_qca.h
+++ b/include/linux/dsa/tag_qca.h
@@ -61,9 +61,9 @@ struct sk_buff;
/* Special struct emulating a Ethernet header */
struct qca_mgmt_ethhdr {
- u32 command; /* command bit 31:0 */
- u32 seq; /* seq 63:32 */
- u32 mdio_data; /* first 4byte mdio */
+ __le32 command; /* command bit 31:0 */
+ __le32 seq; /* seq 63:32 */
+ __le32 mdio_data; /* first 4byte mdio */
__be16 hdr; /* qca hdr */
} __packed;
@@ -73,7 +73,7 @@ enum mdio_cmd {
};
struct mib_ethhdr {
- u32 data[3]; /* first 3 mib counter */
+ __le32 data[3]; /* first 3 mib counter */
__be16 hdr; /* qca hdr */
} __packed;
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index aa4d90a53866..f5b687a787a3 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -34,9 +34,6 @@ struct io_file_table {
unsigned int alloc_hint;
};
-struct io_notif;
-struct io_notif_slot;
-
struct io_hash_bucket {
spinlock_t lock;
struct hlist_head list;
@@ -242,8 +239,6 @@ struct io_ring_ctx {
unsigned nr_user_files;
unsigned nr_user_bufs;
struct io_mapped_ubuf **user_bufs;
- struct io_notif_slot *notif_slots;
- unsigned nr_notif_slots;
struct io_submit_state submit_state;
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index c3b4cc84877b..7fcaf3180a5b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -187,6 +187,7 @@ static inline bool folio_is_device_coherent(const struct folio *folio)
}
#ifdef CONFIG_ZONE_DEVICE
+void zone_device_page_init(struct page *page);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 704a04f5a074..3ef77f52a4f0 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -62,6 +62,8 @@ extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
+int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode, int extra_count);
int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
@@ -197,11 +199,24 @@ struct migrate_vma {
*/
void *pgmap_owner;
unsigned long flags;
+
+ /*
+ * Set to vmf->page if this is being called to migrate a page as part of
+ * a migrate_to_ram() callback.
+ */
+ struct page *fault_page;
};
int migrate_vma_setup(struct migrate_vma *args);
void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
+int migrate_device_range(unsigned long *src_pfns, unsigned long start,
+ unsigned long npages);
+void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
+ unsigned long npages);
+void migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages);
+
#endif /* CONFIG_MIGRATION */
#endif /* _LINUX_MIGRATE_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 8a30de08e913..c726ea781255 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -293,6 +293,7 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
bool reenable_cmdq; /* Re-enable Command Queue */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a36edb0ec199..eddf8ee270e7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3663,8 +3663,9 @@ static inline bool netif_attr_test_online(unsigned long j,
static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
unsigned int nr_bits)
{
- /* n is a prior cpu */
- cpu_max_bits_warn(n + 1, nr_bits);
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
if (srcp)
return find_next_bit(srcp, nr_bits, n + 1);
@@ -3685,8 +3686,9 @@ static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
const unsigned long *src2p,
unsigned int nr_bits)
{
- /* n is a prior cpu */
- cpu_max_bits_warn(n + 1, nr_bits);
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
if (src1p && src2p)
return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 378956c93c94..efef68c9352a 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -516,7 +516,7 @@ static inline int node_random(const nodemask_t *maskp)
bit = first_node(*maskp);
break;
default:
- bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_int() % w);
+ bit = find_nth_bit(maskp->bits, MAX_NUMNODES, prandom_u32_max(w));
break;
}
return bit;
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index c29c3f174972..63800bf4a7ac 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -122,6 +122,7 @@ enum phylink_op_type {
* (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls")
* @poll_fixed_state: if true, starts link_poll,
* if MAC link is at %MLO_AN_FIXED mode.
+ * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
* @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
* @get_fixed_state: callback to execute to determine the fixed link state,
* if MAC link is at %MLO_AN_FIXED mode.
@@ -134,6 +135,7 @@ struct phylink_config {
enum phylink_op_type type;
bool legacy_pre_march2020;
bool poll_fixed_state;
+ bool mac_managed_pm;
bool ovr_an_inband;
void (*get_fixed_state)(struct phylink_config *config,
struct phylink_link_state *state);
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index 78db003bc290..e0a0759dd09c 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -12,18 +12,6 @@
#include <linux/percpu.h>
#include <linux/random.h>
-/* Deprecated: use get_random_u32 instead. */
-static inline u32 prandom_u32(void)
-{
- return get_random_u32();
-}
-
-/* Deprecated: use get_random_bytes instead. */
-static inline void prandom_bytes(void *buf, size_t nbytes)
-{
- return get_random_bytes(buf, nbytes);
-}
-
struct rnd_state {
__u32 s1, s2, s3, s4;
};
diff --git a/include/linux/psi.h b/include/linux/psi.h
index dd74411ac21d..b029a847def1 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -7,6 +7,7 @@
#include <linux/sched.h>
#include <linux/poll.h>
#include <linux/cgroup-defs.h>
+#include <linux/cgroup.h>
struct seq_file;
struct css_set;
@@ -18,10 +19,6 @@ extern struct psi_group psi_system;
void psi_init(void);
-void psi_task_change(struct task_struct *task, int clear, int set);
-void psi_task_switch(struct task_struct *prev, struct task_struct *next,
- bool sleep);
-
void psi_memstall_enter(unsigned long *flags);
void psi_memstall_leave(unsigned long *flags);
@@ -34,9 +31,15 @@ __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
poll_table *wait);
#ifdef CONFIG_CGROUPS
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+ return cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
+}
+
int psi_cgroup_alloc(struct cgroup *cgrp);
void psi_cgroup_free(struct cgroup *cgrp);
void cgroup_move_task(struct task_struct *p, struct css_set *to);
+void psi_cgroup_restart(struct psi_group *group);
#endif
#else /* CONFIG_PSI */
@@ -58,6 +61,7 @@ static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
{
rcu_assign_pointer(p->cgroups, to);
}
+static inline void psi_cgroup_restart(struct psi_group *group) {}
#endif
#endif /* CONFIG_PSI */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index c7fe7c089718..6e4372735068 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -16,13 +16,6 @@ enum psi_task_count {
NR_MEMSTALL,
NR_RUNNING,
/*
- * This can't have values other than 0 or 1 and could be
- * implemented as a bit flag. But for now we still have room
- * in the first cacheline of psi_group_cpu, and this way we
- * don't have to special case any state tracking for it.
- */
- NR_ONCPU,
- /*
* For IO and CPU stalls the presence of running/oncpu tasks
* in the domain means a partial rather than a full stall.
* For memory it's not so simple because of page reclaimers:
@@ -32,22 +25,27 @@ enum psi_task_count {
* threads and memstall ones.
*/
NR_MEMSTALL_RUNNING,
- NR_PSI_TASK_COUNTS = 5,
+ NR_PSI_TASK_COUNTS = 4,
};
/* Task state bitmasks */
#define TSK_IOWAIT (1 << NR_IOWAIT)
#define TSK_MEMSTALL (1 << NR_MEMSTALL)
#define TSK_RUNNING (1 << NR_RUNNING)
-#define TSK_ONCPU (1 << NR_ONCPU)
#define TSK_MEMSTALL_RUNNING (1 << NR_MEMSTALL_RUNNING)
+/* Only one task can be scheduled, no corresponding task count */
+#define TSK_ONCPU (1 << NR_PSI_TASK_COUNTS)
+
/* Resources that workloads could be stalled on */
enum psi_res {
PSI_IO,
PSI_MEM,
PSI_CPU,
- NR_PSI_RESOURCES = 3,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ,
+#endif
+ NR_PSI_RESOURCES,
};
/*
@@ -63,11 +61,17 @@ enum psi_states {
PSI_MEM_FULL,
PSI_CPU_SOME,
PSI_CPU_FULL,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ_FULL,
+#endif
/* Only per-CPU, to weigh the CPU in the global average: */
PSI_NONIDLE,
- NR_PSI_STATES = 7,
+ NR_PSI_STATES,
};
+/* Use one bit in the state mask to track TSK_ONCPU */
+#define PSI_ONCPU (1 << NR_PSI_STATES)
+
enum psi_aggregators {
PSI_AVGS = 0,
PSI_POLL,
@@ -147,6 +151,9 @@ struct psi_trigger {
};
struct psi_group {
+ struct psi_group *parent;
+ bool enabled;
+
/* Protects data used by the aggregator */
struct mutex avgs_lock;
@@ -188,6 +195,8 @@ struct psi_group {
#else /* CONFIG_PSI */
+#define NR_PSI_RESOURCES 0
+
struct psi_group { };
#endif /* CONFIG_PSI */
diff --git a/include/linux/random.h b/include/linux/random.h
index 08322f700cdc..147a5e0d0b8e 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -42,10 +42,6 @@ u8 get_random_u8(void);
u16 get_random_u16(void);
u32 get_random_u32(void);
u64 get_random_u64(void);
-static inline unsigned int get_random_int(void)
-{
- return get_random_u32();
-}
static inline unsigned long get_random_long(void)
{
#if BITS_PER_LONG == 64
@@ -100,7 +96,6 @@ declare_get_random_var_wait(u8, u8)
declare_get_random_var_wait(u16, u16)
declare_get_random_var_wait(u32, u32)
declare_get_random_var_wait(u64, u32)
-declare_get_random_var_wait(int, unsigned int)
declare_get_random_var_wait(long, unsigned long)
#undef declare_get_random_var
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77f68f8b795c..ffb6eb55cd13 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -870,8 +870,6 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
- /* Per-thread vma caching: */
-
#ifdef SPLIT_RSS_COUNTING
struct task_rss_stat rss_stat;
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9fcf534f2d92..7be5bb4c94b6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -803,6 +803,7 @@ typedef unsigned char *sk_buff_data_t;
* @csum_level: indicates the number of consecutive checksums found in
* the packet minus one that have been verified as
* CHECKSUM_UNNECESSARY (max 3)
+ * @scm_io_uring: SKB holds io_uring registered files
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required
@@ -982,6 +983,7 @@ struct sk_buff {
#endif
__u8 slow_gro:1;
__u8 csum_not_inet:1;
+ __u8 scm_io_uring:1;
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index e24c9aff6fed..f0ffad6a3365 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -33,7 +33,6 @@ struct kmem_cache {
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
- struct kmem_cache *freelist_cache;
unsigned int freelist_size;
/* constructor func */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 8f780170e2f8..3d08e67b3cfc 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -37,6 +37,7 @@ struct genl_info;
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
+ * @module: pointer to the owning module (set to THIS_MODULE)
* @mcgrps: multicast groups used by this family
* @n_mcgrps: number of multicast groups
* @resv_start_op: first operation for which reserved fields of the header
@@ -173,9 +174,9 @@ struct genl_ops {
};
/**
- * struct genl_info - info that is available during dumpit op call
+ * struct genl_dumpit_info - info that is available during dumpit op call
* @family: generic netlink family - for internal genl code usage
- * @ops: generic netlink ops - for internal genl code usage
+ * @op: generic netlink ops - for internal genl code usage
* @attrs: netlink attributes
*/
struct genl_dumpit_info {
@@ -354,6 +355,7 @@ int genlmsg_multicast_allns(const struct genl_family *family,
/**
* genlmsg_unicast - unicast a netlink message
+ * @net: network namespace to look up @portid in
* @skb: netlink message as socket buffer
* @portid: netlink portid of the destination socket
*/
@@ -373,7 +375,7 @@ static inline int genlmsg_reply(struct sk_buff *skb, struct genl_info *info)
}
/**
- * gennlmsg_data - head of message payload
+ * genlmsg_data - head of message payload
* @gnlh: genetlink message header
*/
static inline void *genlmsg_data(const struct genlmsghdr *gnlh)
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 980daa6e1e3a..c81021ab07aa 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -43,7 +43,7 @@ void nf_queue_entry_free(struct nf_queue_entry *entry);
static inline void init_hashrandom(u32 *jhash_initval)
{
while (*jhash_initval == 0)
- *jhash_initval = prandom_u32();
+ *jhash_initval = get_random_u32();
}
static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
diff --git a/include/net/red.h b/include/net/red.h
index 454ac2b65d8c..425364de0df7 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -363,7 +363,7 @@ static inline unsigned long red_calc_qavg(const struct red_parms *p,
static inline u32 red_random(const struct red_parms *p)
{
- return reciprocal_divide(prandom_u32(), p->max_P_reciprocal);
+ return reciprocal_divide(get_random_u32(), p->max_P_reciprocal);
}
static inline int red_mark_probability(const struct red_parms *p,
diff --git a/include/net/sock.h b/include/net/sock.h
index 08038a385ef2..9e464f6409a7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2109,7 +2109,7 @@ static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
static inline u32 net_tx_rndhash(void)
{
- u32 v = prandom_u32();
+ u32 v = get_random_u32();
return v ?: 1;
}
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 473b0b0fa4ab..efc9085c6892 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -43,21 +43,20 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
extern int reuseport_detach_prog(struct sock *sk);
-static inline bool reuseport_has_conns(struct sock *sk, bool set)
+static inline bool reuseport_has_conns(struct sock *sk)
{
struct sock_reuseport *reuse;
bool ret = false;
rcu_read_lock();
reuse = rcu_dereference(sk->sk_reuseport_cb);
- if (reuse) {
- if (set)
- reuse->has_conns = 1;
- ret = reuse->has_conns;
- }
+ if (reuse && reuse->has_conns)
+ ret = true;
rcu_read_unlock();
return ret;
}
+void reuseport_has_conns_set(struct sock *sk);
+
#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/soc/sifive/sifive_ccache.h b/include/soc/sifive/sifive_ccache.h
new file mode 100644
index 000000000000..4d4ed49388a0
--- /dev/null
+++ b/include/soc/sifive/sifive_ccache.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SiFive Composable Cache Controller header file
+ *
+ */
+
+#ifndef __SOC_SIFIVE_CCACHE_H
+#define __SOC_SIFIVE_CCACHE_H
+
+extern int register_sifive_ccache_error_notifier(struct notifier_block *nb);
+extern int unregister_sifive_ccache_error_notifier(struct notifier_block *nb);
+
+#define SIFIVE_CCACHE_ERR_TYPE_CE 0
+#define SIFIVE_CCACHE_ERR_TYPE_UE 1
+
+#endif /* __SOC_SIFIVE_CCACHE_H */
diff --git a/include/soc/sifive/sifive_l2_cache.h b/include/soc/sifive/sifive_l2_cache.h
deleted file mode 100644
index 92ade10ed67e..000000000000
--- a/include/soc/sifive/sifive_l2_cache.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * SiFive L2 Cache Controller header file
- *
- */
-
-#ifndef __SOC_SIFIVE_L2_CACHE_H
-#define __SOC_SIFIVE_L2_CACHE_H
-
-extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
-extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
-
-#define SIFIVE_L2_ERR_TYPE_CE 0
-#define SIFIVE_L2_ERR_TYPE_UE 1
-
-#endif /* __SOC_SIFIVE_L2_CACHE_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index ddff03e546e9..35778f953a3f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -592,11 +592,11 @@ int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
#define snd_hdac_stream_readb(dev, reg) \
snd_hdac_reg_readb((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
#define snd_hdac_stream_readb_poll(dev, reg, val, cond, delay_us, timeout_us) \
- readb_poll_timeout((dev)->sd_addr + AZX_REG_ ## reg, val, cond, \
- delay_us, timeout_us)
+ read_poll_timeout_atomic(snd_hdac_reg_readb, val, cond, delay_us, timeout_us, \
+ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
#define snd_hdac_stream_readl_poll(dev, reg, val, cond, delay_us, timeout_us) \
- readl_poll_timeout((dev)->sd_addr + AZX_REG_ ## reg, val, cond, \
- delay_us, timeout_us)
+ read_poll_timeout_atomic(snd_hdac_reg_readl, val, cond, delay_us, timeout_us, \
+ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
/* update a register, pass without AZX_REG_ prefix */
#define snd_hdac_stream_updatel(dev, reg, mask, val) \
diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h
index b69e9ba6742b..dcb179de4358 100644
--- a/include/uapi/mtd/ubi-user.h
+++ b/include/uapi/mtd/ubi-user.h
@@ -247,6 +247,7 @@ enum {
* @vid_hdr_offset: VID header offset (use defaults if %0)
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
* @padding: reserved for future, not used, has to be zeroed
+ * @disable_fm: whether disable fastmap
*
* This data structure is used to specify MTD device UBI has to attach and the
* parameters it has to use. The number which should be assigned to the new UBI
@@ -281,13 +282,18 @@ enum {
* eraseblocks for new bad eraseblocks, but attempts to use available
* eraseblocks (if any). The accepted range is 0-768. If 0 is given, the
* default kernel value of %CONFIG_MTD_UBI_BEB_LIMIT will be used.
+ *
+ * If @disable_fm is not zero, ubi doesn't create new fastmap even the module
+ * param 'fm_autoconvert' is set, and existed old fastmap will be destroyed
+ * after doing full scanning.
*/
struct ubi_attach_req {
__s32 ubi_num;
__s32 mtd_num;
__s32 vid_hdr_offset;
__s16 max_beb_per1024;
- __s8 padding[10];
+ __s8 disable_fm;
+ __s8 padding[9];
};
/*
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index 4eae088046d0..2e04850a657b 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -94,7 +94,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
if (sq_idx > sq_mask)
continue;
- sqe = &ctx->sq_sqes[sq_idx << 1];
+ sqe = &ctx->sq_sqes[sq_idx << sq_shift];
seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
"addr:0x%llx, rw_flags:0x%x, buf_index:%d "
"user_data:%llu",
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 99a52f34b7d3..de08d9902b30 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1106,6 +1106,8 @@ static void io_req_local_work_add(struct io_kiocb *req)
if (!llist_add(&req->io_task_work.node, &ctx->work_llist))
return;
+ /* need it for the following io_cqring_wake() */
+ smp_mb__after_atomic();
if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
io_move_task_work_from_local(ctx);
@@ -1117,8 +1119,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
if (ctx->has_evfd)
io_eventfd_signal(ctx);
- io_cqring_wake(ctx);
-
+ __io_cqring_wake(ctx);
}
static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
@@ -2585,12 +2586,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
{
io_sq_thread_finish(ctx);
-
- if (ctx->mm_account) {
- mmdrop(ctx->mm_account);
- ctx->mm_account = NULL;
- }
-
io_rsrc_refs_drop(ctx);
/* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
io_wait_rsrc_data(ctx->buf_data);
@@ -2631,8 +2626,11 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
}
#endif
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
- WARN_ON_ONCE(ctx->notif_slots || ctx->nr_notif_slots);
+ if (ctx->mm_account) {
+ mmdrop(ctx->mm_account);
+ ctx->mm_account = NULL;
+ }
io_mem_free(ctx->rings);
io_mem_free(ctx->sq_sqes);
@@ -3229,8 +3227,16 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
mutex_unlock(&ctx->uring_lock);
goto out;
}
- if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll)
- goto iopoll_locked;
+ if (flags & IORING_ENTER_GETEVENTS) {
+ if (ctx->syscall_iopoll)
+ goto iopoll_locked;
+ /*
+ * Ignore errors, we'll soon call io_cqring_wait() and
+ * it should handle ownership problems if any.
+ */
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ (void)io_run_local_work_locked(ctx);
+ }
mutex_unlock(&ctx->uring_lock);
}
@@ -3355,7 +3361,7 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
if (fd < 0)
return fd;
- ret = __io_uring_add_tctx_node(ctx, false);
+ ret = __io_uring_add_tctx_node(ctx);
if (ret) {
put_unused_fd(fd);
return ret;
@@ -3890,6 +3896,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
return -ENXIO;
+ if (ctx->submitter_task && ctx->submitter_task != current)
+ return -EEXIST;
+
if (ctx->restricted) {
if (opcode >= IORING_REGISTER_LAST)
return -EINVAL;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 48ce2348c8c1..ef77d2aa3172 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -203,17 +203,24 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}
-static inline void io_cqring_wake(struct io_ring_ctx *ctx)
+/* requires smb_mb() prior, see wq_has_sleeper() */
+static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
{
/*
* wake_up_all() may seem excessive, but io_wake_function() and
* io_should_wake() handle the termination of the loop and only
* wake as many waiters as we need to.
*/
- if (wq_has_sleeper(&ctx->cq_wait))
+ if (waitqueue_active(&ctx->cq_wait))
wake_up_all(&ctx->cq_wait);
}
+static inline void io_cqring_wake(struct io_ring_ctx *ctx)
+{
+ smp_mb();
+ __io_cqring_wake(ctx);
+}
+
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{
struct io_rings *r = ctx->rings;
@@ -268,6 +275,13 @@ static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
return ret;
}
+static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
+{
+ if (llist_empty(&ctx->work_llist))
+ return 0;
+ return __io_run_local_work(ctx, true);
+}
+
static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
{
if (!*locked) {
diff --git a/io_uring/net.c b/io_uring/net.c
index caa6a803cb72..8c7226b5bf41 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -46,6 +46,7 @@ struct io_connect {
struct file *file;
struct sockaddr __user *addr;
int addr_len;
+ bool in_progress;
};
struct io_sr_msg {
@@ -1386,6 +1387,7 @@ int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
conn->addr_len = READ_ONCE(sqe->addr2);
+ conn->in_progress = false;
return 0;
}
@@ -1397,6 +1399,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ if (connect->in_progress) {
+ struct socket *socket;
+
+ ret = -ENOTSOCK;
+ socket = sock_from_file(req->file);
+ if (socket)
+ ret = sock_error(socket->sk);
+ goto out;
+ }
+
if (req_has_async_data(req)) {
io = req->async_data;
} else {
@@ -1413,13 +1425,17 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
ret = __sys_connect_file(req->file, &io->address,
connect->addr_len, file_flags);
if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
- if (req_has_async_data(req))
- return -EAGAIN;
- if (io_alloc_async_data(req)) {
- ret = -ENOMEM;
- goto out;
+ if (ret == -EINPROGRESS) {
+ connect->in_progress = true;
+ } else {
+ if (req_has_async_data(req))
+ return -EAGAIN;
+ if (io_alloc_async_data(req)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(req->async_data, &__io, sizeof(__io));
}
- memcpy(req->async_data, &__io, sizeof(__io));
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 2330f6da791e..83dc0f9ad3b2 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -510,7 +510,6 @@ const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
- .audit_skip = 1,
.ioprio = 1,
.manual_alloc = 1,
#if defined(CONFIG_NET)
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 6f88ded0e7e5..012fdb04ec23 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -855,6 +855,7 @@ int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
UNIXCB(skb).fp = fpl;
skb->sk = sk;
+ skb->scm_io_uring = 1;
skb->destructor = unix_destruct_scm;
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
}
diff --git a/io_uring/rw.c b/io_uring/rw.c
index a25cd44cd415..100de2626e47 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -234,11 +234,34 @@ static void kiocb_end_write(struct io_kiocb *req)
}
}
+/*
+ * Trigger the notifications after having done some IO, and finish the write
+ * accounting, if any.
+ */
+static void io_req_io_end(struct io_kiocb *req)
+{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+
+ WARN_ON(!in_task());
+
+ if (rw->kiocb.ki_flags & IOCB_WRITE) {
+ kiocb_end_write(req);
+ fsnotify_modify(req->file);
+ } else {
+ fsnotify_access(req->file);
+ }
+}
+
static bool __io_complete_rw_common(struct io_kiocb *req, long res)
{
if (unlikely(res != req->cqe.res)) {
if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
io_rw_should_reissue(req)) {
+ /*
+ * Reissue will start accounting again, finish the
+ * current cycle.
+ */
+ io_req_io_end(req);
req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
return true;
}
@@ -264,15 +287,7 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
{
- struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-
- if (rw->kiocb.ki_flags & IOCB_WRITE) {
- kiocb_end_write(req);
- fsnotify_modify(req->file);
- } else {
- fsnotify_access(req->file);
- }
-
+ io_req_io_end(req);
io_req_task_complete(req, locked);
}
@@ -317,6 +332,11 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
req->file->f_pos = rw->kiocb.ki_pos;
if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
if (!__io_complete_rw_common(req, ret)) {
+ /*
+ * Safe to call io_end from here as we're inline
+ * from the submission path.
+ */
+ io_req_io_end(req);
io_req_set_res(req, final_ret,
io_put_kbuf(req, issue_flags));
return IOU_OK;
@@ -916,7 +936,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
goto copy_iov;
if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
- struct io_async_rw *rw;
+ struct io_async_rw *io;
trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
req->cqe.res, ret2);
@@ -929,9 +949,9 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
iov_iter_save_state(&s->iter, &s->iter_state);
ret = io_setup_async_rw(req, iovec, s, true);
- rw = req->async_data;
- if (rw)
- rw->bytes_done += ret2;
+ io = req->async_data;
+ if (io)
+ io->bytes_done += ret2;
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
diff --git a/io_uring/tctx.c b/io_uring/tctx.c
index 7f97d97fef0a..4324b1cf1f6a 100644
--- a/io_uring/tctx.c
+++ b/io_uring/tctx.c
@@ -91,32 +91,12 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
return 0;
}
-static int io_register_submitter(struct io_ring_ctx *ctx)
-{
- int ret = 0;
-
- mutex_lock(&ctx->uring_lock);
- if (!ctx->submitter_task)
- ctx->submitter_task = get_task_struct(current);
- else if (ctx->submitter_task != current)
- ret = -EEXIST;
- mutex_unlock(&ctx->uring_lock);
-
- return ret;
-}
-
-int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter)
+int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx = current->io_uring;
struct io_tctx_node *node;
int ret;
- if ((ctx->flags & IORING_SETUP_SINGLE_ISSUER) && submitter) {
- ret = io_register_submitter(ctx);
- if (ret)
- return ret;
- }
-
if (unlikely(!tctx)) {
ret = io_uring_alloc_task_context(current, ctx);
if (unlikely(ret))
@@ -150,8 +130,22 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter)
list_add(&node->ctx_node, &ctx->tctx_list);
mutex_unlock(&ctx->uring_lock);
}
- if (submitter)
- tctx->last = ctx;
+ return 0;
+}
+
+int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)
+{
+ int ret;
+
+ if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
+ && ctx->submitter_task != current)
+ return -EEXIST;
+
+ ret = __io_uring_add_tctx_node(ctx);
+ if (ret)
+ return ret;
+
+ current->io_uring->last = ctx;
return 0;
}
@@ -259,7 +253,7 @@ int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
return -EINVAL;
mutex_unlock(&ctx->uring_lock);
- ret = __io_uring_add_tctx_node(ctx, false);
+ ret = __io_uring_add_tctx_node(ctx);
mutex_lock(&ctx->uring_lock);
if (ret)
return ret;
diff --git a/io_uring/tctx.h b/io_uring/tctx.h
index 25974beed4d6..608e96de70a2 100644
--- a/io_uring/tctx.h
+++ b/io_uring/tctx.h
@@ -9,7 +9,8 @@ struct io_tctx_node {
int io_uring_alloc_task_context(struct task_struct *task,
struct io_ring_ctx *ctx);
void io_uring_del_tctx_node(unsigned long index);
-int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter);
+int __io_uring_add_tctx_node(struct io_ring_ctx *ctx);
+int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx);
void io_uring_clean_tctx(struct io_uring_task *tctx);
void io_uring_unreg_ringfd(void);
@@ -27,5 +28,6 @@ static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
if (likely(tctx && tctx->last == ctx))
return 0;
- return __io_uring_add_tctx_node(ctx, true);
+
+ return __io_uring_add_tctx_node_from_submit(ctx);
}
diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c
index b9ea539a5561..48ee750849f2 100644
--- a/kernel/bpf/bloom_filter.c
+++ b/kernel/bpf/bloom_filter.c
@@ -158,7 +158,7 @@ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
attr->value_size / sizeof(u32);
if (!(attr->map_flags & BPF_F_ZERO_SEED))
- bloom->hash_seed = get_random_int();
+ bloom->hash_seed = get_random_u32();
return &bloom->map;
}
diff --git a/kernel/bpf/cgroup_iter.c b/kernel/bpf/cgroup_iter.c
index 0d200a993489..9fcf09f2ef00 100644
--- a/kernel/bpf/cgroup_iter.c
+++ b/kernel/bpf/cgroup_iter.c
@@ -196,7 +196,7 @@ static int bpf_iter_attach_cgroup(struct bpf_prog *prog,
return -EINVAL;
if (fd)
- cgrp = cgroup_get_from_fd(fd);
+ cgrp = cgroup_v1v2_get_from_fd(fd);
else if (id)
cgrp = cgroup_get_from_id(id);
else /* walk the entire hierarchy by default. */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index a0e762a2bf97..9c16338bcbe8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1032,7 +1032,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
hdr->size = size;
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
PAGE_SIZE - sizeof(*hdr));
- start = (get_random_int() % hole) & ~(alignment - 1);
+ start = prandom_u32_max(hole) & ~(alignment - 1);
/* Leave a random number of instructions before BPF code. */
*image_ptr = &hdr->image[start];
@@ -1094,7 +1094,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
- start = (get_random_int() % hole) & ~(alignment - 1);
+ start = prandom_u32_max(hole) & ~(alignment - 1);
*image_ptr = &ro_header->image[start];
*rw_image = &(*rw_header)->image[start];
@@ -1216,7 +1216,7 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
bool emit_zext)
{
struct bpf_insn *to = to_buff;
- u32 imm_rnd = get_random_int();
+ u32 imm_rnd = get_random_u32();
s16 off;
BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index ed3f8a53603b..f39ee3e05589 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -527,7 +527,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (htab->map.map_flags & BPF_F_ZERO_SEED)
htab->hashrnd = 0;
else
- htab->hashrnd = get_random_int();
+ htab->hashrnd = get_random_u32();
htab_init_buckets(htab);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 6f6d2d511c06..014ee0953dbd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -13350,7 +13350,7 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
aux[adj_idx].ptr_type == PTR_TO_CTX)
continue;
- imm_rnd = get_random_int();
+ imm_rnd = get_random_u32();
rnd_hi32_patch[0] = insn;
rnd_hi32_patch[1].imm = imm_rnd;
rnd_hi32_patch[3].dst_reg = load_reg;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 764bdd5fd8d1..2319946715e0 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1392,6 +1392,9 @@ static void cgroup_destroy_root(struct cgroup_root *root)
cgroup_free_root(root);
}
+/*
+ * Returned cgroup is without refcount but it's valid as long as cset pins it.
+ */
static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
struct cgroup_root *root)
{
@@ -1403,6 +1406,7 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
res_cgroup = cset->dfl_cgrp;
} else {
struct cgrp_cset_link *link;
+ lockdep_assert_held(&css_set_lock);
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
struct cgroup *c = link->cgrp;
@@ -1414,6 +1418,7 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
}
}
+ BUG_ON(!res_cgroup);
return res_cgroup;
}
@@ -1436,23 +1441,36 @@ current_cgns_cgroup_from_root(struct cgroup_root *root)
rcu_read_unlock();
- BUG_ON(!res);
return res;
}
+/*
+ * Look up cgroup associated with current task's cgroup namespace on the default
+ * hierarchy.
+ *
+ * Unlike current_cgns_cgroup_from_root(), this doesn't need locks:
+ * - Internal rcu_read_lock is unnecessary because we don't dereference any rcu
+ * pointers.
+ * - css_set_lock is not needed because we just read cset->dfl_cgrp.
+ * - As a bonus returned cgrp is pinned with the current because it cannot
+ * switch cgroup_ns asynchronously.
+ */
+static struct cgroup *current_cgns_cgroup_dfl(void)
+{
+ struct css_set *cset;
+
+ cset = current->nsproxy->cgroup_ns->root_cset;
+ return __cset_cgroup_from_root(cset, &cgrp_dfl_root);
+}
+
/* look up cgroup associated with given css_set on the specified hierarchy */
static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
struct cgroup_root *root)
{
- struct cgroup *res = NULL;
-
lockdep_assert_held(&cgroup_mutex);
lockdep_assert_held(&css_set_lock);
- res = __cset_cgroup_from_root(cset, root);
-
- BUG_ON(!res);
- return res;
+ return __cset_cgroup_from_root(cset, root);
}
/*
@@ -3698,27 +3716,27 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
- struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
+ struct psi_group *psi = cgroup_psi(cgrp);
return psi_show(seq, psi, PSI_IO);
}
static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
- struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
+ struct psi_group *psi = cgroup_psi(cgrp);
return psi_show(seq, psi, PSI_MEM);
}
static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
- struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
+ struct psi_group *psi = cgroup_psi(cgrp);
return psi_show(seq, psi, PSI_CPU);
}
-static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
- size_t nbytes, enum psi_res res)
+static ssize_t pressure_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, enum psi_res res)
{
struct cgroup_file_ctx *ctx = of->priv;
struct psi_trigger *new;
@@ -3738,7 +3756,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
return -EBUSY;
}
- psi = cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
+ psi = cgroup_psi(cgrp);
new = psi_trigger_create(psi, buf, res);
if (IS_ERR(new)) {
cgroup_put(cgrp);
@@ -3755,21 +3773,86 @@ static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
- return cgroup_pressure_write(of, buf, nbytes, PSI_IO);
+ return pressure_write(of, buf, nbytes, PSI_IO);
}
static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
- return cgroup_pressure_write(of, buf, nbytes, PSI_MEM);
+ return pressure_write(of, buf, nbytes, PSI_MEM);
}
static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
char *buf, size_t nbytes,
loff_t off)
{
- return cgroup_pressure_write(of, buf, nbytes, PSI_CPU);
+ return pressure_write(of, buf, nbytes, PSI_CPU);
+}
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+static int cgroup_irq_pressure_show(struct seq_file *seq, void *v)
+{
+ struct cgroup *cgrp = seq_css(seq)->cgroup;
+ struct psi_group *psi = cgroup_psi(cgrp);
+
+ return psi_show(seq, psi, PSI_IRQ);
+}
+
+static ssize_t cgroup_irq_pressure_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes,
+ loff_t off)
+{
+ return pressure_write(of, buf, nbytes, PSI_IRQ);
+}
+#endif
+
+static int cgroup_pressure_show(struct seq_file *seq, void *v)
+{
+ struct cgroup *cgrp = seq_css(seq)->cgroup;
+ struct psi_group *psi = cgroup_psi(cgrp);
+
+ seq_printf(seq, "%d\n", psi->enabled);
+
+ return 0;
+}
+
+static ssize_t cgroup_pressure_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes,
+ loff_t off)
+{
+ ssize_t ret;
+ int enable;
+ struct cgroup *cgrp;
+ struct psi_group *psi;
+
+ ret = kstrtoint(strstrip(buf), 0, &enable);
+ if (ret)
+ return ret;
+
+ if (enable < 0 || enable > 1)
+ return -ERANGE;
+
+ cgrp = cgroup_kn_lock_live(of->kn, false);
+ if (!cgrp)
+ return -ENOENT;
+
+ psi = cgroup_psi(cgrp);
+ if (psi->enabled != enable) {
+ int i;
+
+ /* show or hide {cpu,memory,io,irq}.pressure files */
+ for (i = 0; i < NR_PSI_RESOURCES; i++)
+ cgroup_file_show(&cgrp->psi_files[i], enable);
+
+ psi->enabled = enable;
+ if (enable)
+ psi_cgroup_restart(psi);
+ }
+
+ cgroup_kn_unlock(of->kn);
+
+ return nbytes;
}
static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
@@ -3789,6 +3872,9 @@ static void cgroup_pressure_release(struct kernfs_open_file *of)
bool cgroup_psi_enabled(void)
{
+ if (static_branch_likely(&psi_disabled))
+ return false;
+
return (cgroup_feature_disable_mask & (1 << OPT_FEATURE_PRESSURE)) == 0;
}
@@ -5175,6 +5261,7 @@ static struct cftype cgroup_psi_files[] = {
#ifdef CONFIG_PSI
{
.name = "io.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_IO]),
.seq_show = cgroup_io_pressure_show,
.write = cgroup_io_pressure_write,
.poll = cgroup_pressure_poll,
@@ -5182,6 +5269,7 @@ static struct cftype cgroup_psi_files[] = {
},
{
.name = "memory.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_MEM]),
.seq_show = cgroup_memory_pressure_show,
.write = cgroup_memory_pressure_write,
.poll = cgroup_pressure_poll,
@@ -5189,11 +5277,27 @@ static struct cftype cgroup_psi_files[] = {
},
{
.name = "cpu.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_CPU]),
.seq_show = cgroup_cpu_pressure_show,
.write = cgroup_cpu_pressure_write,
.poll = cgroup_pressure_poll,
.release = cgroup_pressure_release,
},
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ {
+ .name = "irq.pressure",
+ .file_offset = offsetof(struct cgroup, psi_files[PSI_IRQ]),
+ .seq_show = cgroup_irq_pressure_show,
+ .write = cgroup_irq_pressure_write,
+ .poll = cgroup_pressure_poll,
+ .release = cgroup_pressure_release,
+ },
+#endif
+ {
+ .name = "cgroup.pressure",
+ .seq_show = cgroup_pressure_show,
+ .write = cgroup_pressure_write,
+ },
#endif /* CONFIG_PSI */
{ } /* terminate */
};
@@ -6105,9 +6209,7 @@ struct cgroup *cgroup_get_from_id(u64 id)
if (!cgrp)
return ERR_PTR(-ENOENT);
- spin_lock_irq(&css_set_lock);
- root_cgrp = current_cgns_cgroup_from_root(&cgrp_dfl_root);
- spin_unlock_irq(&css_set_lock);
+ root_cgrp = current_cgns_cgroup_dfl();
if (!cgroup_is_descendant(cgrp, root_cgrp)) {
cgroup_put(cgrp);
return ERR_PTR(-ENOENT);
@@ -6208,16 +6310,42 @@ void cgroup_fork(struct task_struct *child)
INIT_LIST_HEAD(&child->cg_list);
}
-static struct cgroup *cgroup_get_from_file(struct file *f)
+/**
+ * cgroup_v1v2_get_from_file - get a cgroup pointer from a file pointer
+ * @f: file corresponding to cgroup_dir
+ *
+ * Find the cgroup from a file pointer associated with a cgroup directory.
+ * Returns a pointer to the cgroup on success. ERR_PTR is returned if the
+ * cgroup cannot be found.
+ */
+static struct cgroup *cgroup_v1v2_get_from_file(struct file *f)
{
struct cgroup_subsys_state *css;
- struct cgroup *cgrp;
css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
if (IS_ERR(css))
return ERR_CAST(css);
- cgrp = css->cgroup;
+ return css->cgroup;
+}
+
+/**
+ * cgroup_get_from_file - same as cgroup_v1v2_get_from_file, but only supports
+ * cgroup2.
+ * @f: file corresponding to cgroup2_dir
+ */
+static struct cgroup *cgroup_get_from_file(struct file *f)
+{
+ struct cgroup *cgrp = cgroup_v1v2_get_from_file(f);
+
+ if (IS_ERR(cgrp))
+ return ERR_CAST(cgrp);
+
+ if (!cgroup_on_dfl(cgrp)) {
+ cgroup_put(cgrp);
+ return ERR_PTR(-EBADF);
+ }
+
return cgrp;
}
@@ -6686,10 +6814,8 @@ struct cgroup *cgroup_get_from_path(const char *path)
struct cgroup *cgrp = ERR_PTR(-ENOENT);
struct cgroup *root_cgrp;
- spin_lock_irq(&css_set_lock);
- root_cgrp = current_cgns_cgroup_from_root(&cgrp_dfl_root);
+ root_cgrp = current_cgns_cgroup_dfl();
kn = kernfs_walk_and_get(root_cgrp->kn, path);
- spin_unlock_irq(&css_set_lock);
if (!kn)
goto out;
@@ -6714,15 +6840,15 @@ out:
EXPORT_SYMBOL_GPL(cgroup_get_from_path);
/**
- * cgroup_get_from_fd - get a cgroup pointer from a fd
- * @fd: fd obtained by open(cgroup2_dir)
+ * cgroup_v1v2_get_from_fd - get a cgroup pointer from a fd
+ * @fd: fd obtained by open(cgroup_dir)
*
* Find the cgroup from a fd which should be obtained
* by opening a cgroup directory. Returns a pointer to the
* cgroup on success. ERR_PTR is returned if the cgroup
* cannot be found.
*/
-struct cgroup *cgroup_get_from_fd(int fd)
+struct cgroup *cgroup_v1v2_get_from_fd(int fd)
{
struct cgroup *cgrp;
struct file *f;
@@ -6731,10 +6857,29 @@ struct cgroup *cgroup_get_from_fd(int fd)
if (!f)
return ERR_PTR(-EBADF);
- cgrp = cgroup_get_from_file(f);
+ cgrp = cgroup_v1v2_get_from_file(f);
fput(f);
return cgrp;
}
+
+/**
+ * cgroup_get_from_fd - same as cgroup_v1v2_get_from_fd, but only supports
+ * cgroup2.
+ * @fd: fd obtained by open(cgroup2_dir)
+ */
+struct cgroup *cgroup_get_from_fd(int fd)
+{
+ struct cgroup *cgrp = cgroup_v1v2_get_from_fd(fd);
+
+ if (IS_ERR(cgrp))
+ return ERR_CAST(cgrp);
+
+ if (!cgroup_on_dfl(cgrp)) {
+ cgroup_put(cgrp);
+ return ERR_PTR(-EBADF);
+ }
+ return cgrp;
+}
EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
static u64 power_of_ten(int power)
diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c
index 75712959c84e..00cdf8fa5693 100644
--- a/kernel/kcsan/selftest.c
+++ b/kernel/kcsan/selftest.c
@@ -26,7 +26,7 @@
static bool __init test_requires(void)
{
/* random should be initialized for the below tests */
- return prandom_u32() + prandom_u32() != 0;
+ return get_random_u32() + get_random_u32() != 0;
}
/*
@@ -46,7 +46,7 @@ static bool __init test_encode_decode(void)
unsigned long addr;
size_t verif_size;
- prandom_bytes(&addr, sizeof(addr));
+ get_random_bytes(&addr, sizeof(addr));
if (addr < PAGE_SIZE)
addr = PAGE_SIZE;
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 353004155d65..43efb2a04160 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -399,7 +399,7 @@ static int *get_random_order(int count)
order[n] = n;
for (n = count - 1; n > 1; n--) {
- r = get_random_int() % (n + 1);
+ r = prandom_u32_max(n + 1);
if (r != n) {
tmp = order[n];
order[n] = order[r];
@@ -538,7 +538,7 @@ static void stress_one_work(struct work_struct *work)
{
struct stress *stress = container_of(work, typeof(*stress), work);
const int nlocks = stress->nlocks;
- struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
+ struct ww_mutex *lock = stress->locks + prandom_u32_max(nlocks);
int err;
do {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e4ce124ec701..5800b0623ff3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -701,6 +701,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
rq->prev_irq_time += irq_delta;
delta -= irq_delta;
+ psi_account_irqtime(rq->curr, irq_delta);
#endif
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
if (static_key_false((&paravirt_steal_rq_enabled))) {
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 7f6030091aee..ee2ecc081422 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -181,6 +181,7 @@ static void group_init(struct psi_group *group)
{
int cpu;
+ group->enabled = true;
for_each_possible_cpu(cpu)
seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
group->avg_last_update = sched_clock();
@@ -201,6 +202,7 @@ void __init psi_init(void)
{
if (!psi_enable) {
static_branch_enable(&psi_disabled);
+ static_branch_disable(&psi_cgroups_enabled);
return;
}
@@ -211,7 +213,7 @@ void __init psi_init(void)
group_init(&psi_system);
}
-static bool test_state(unsigned int *tasks, enum psi_states state)
+static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu)
{
switch (state) {
case PSI_IO_SOME:
@@ -224,9 +226,9 @@ static bool test_state(unsigned int *tasks, enum psi_states state)
return unlikely(tasks[NR_MEMSTALL] &&
tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
case PSI_CPU_SOME:
- return unlikely(tasks[NR_RUNNING] > tasks[NR_ONCPU]);
+ return unlikely(tasks[NR_RUNNING] > oncpu);
case PSI_CPU_FULL:
- return unlikely(tasks[NR_RUNNING] && !tasks[NR_ONCPU]);
+ return unlikely(tasks[NR_RUNNING] && !oncpu);
case PSI_NONIDLE:
return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
tasks[NR_RUNNING];
@@ -688,35 +690,53 @@ static void psi_group_change(struct psi_group *group, int cpu,
bool wake_clock)
{
struct psi_group_cpu *groupc;
- u32 state_mask = 0;
unsigned int t, m;
enum psi_states s;
+ u32 state_mask;
groupc = per_cpu_ptr(group->pcpu, cpu);
/*
- * First we assess the aggregate resource states this CPU's
- * tasks have been in since the last change, and account any
- * SOME and FULL time these may have resulted in.
- *
- * Then we update the task counts according to the state
+ * First we update the task counts according to the state
* change requested through the @clear and @set bits.
+ *
+ * Then if the cgroup PSI stats accounting enabled, we
+ * assess the aggregate resource states this CPU's tasks
+ * have been in since the last change, and account any
+ * SOME and FULL time these may have resulted in.
*/
write_seqcount_begin(&groupc->seq);
- record_times(groupc, now);
+ /*
+ * Start with TSK_ONCPU, which doesn't have a corresponding
+ * task count - it's just a boolean flag directly encoded in
+ * the state mask. Clear, set, or carry the current state if
+ * no changes are requested.
+ */
+ if (unlikely(clear & TSK_ONCPU)) {
+ state_mask = 0;
+ clear &= ~TSK_ONCPU;
+ } else if (unlikely(set & TSK_ONCPU)) {
+ state_mask = PSI_ONCPU;
+ set &= ~TSK_ONCPU;
+ } else {
+ state_mask = groupc->state_mask & PSI_ONCPU;
+ }
+ /*
+ * The rest of the state mask is calculated based on the task
+ * counts. Update those first, then construct the mask.
+ */
for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
if (!(m & (1 << t)))
continue;
if (groupc->tasks[t]) {
groupc->tasks[t]--;
} else if (!psi_bug) {
- printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u %u] clear=%x set=%x\n",
+ printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
cpu, t, groupc->tasks[0],
groupc->tasks[1], groupc->tasks[2],
- groupc->tasks[3], groupc->tasks[4],
- clear, set);
+ groupc->tasks[3], clear, set);
psi_bug = 1;
}
}
@@ -725,9 +745,25 @@ static void psi_group_change(struct psi_group *group, int cpu,
if (set & (1 << t))
groupc->tasks[t]++;
- /* Calculate state mask representing active states */
+ if (!group->enabled) {
+ /*
+ * On the first group change after disabling PSI, conclude
+ * the current state and flush its time. This is unlikely
+ * to matter to the user, but aggregation (get_recent_times)
+ * may have already incorporated the live state into times_prev;
+ * avoid a delta sample underflow when PSI is later re-enabled.
+ */
+ if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE)))
+ record_times(groupc, now);
+
+ groupc->state_mask = state_mask;
+
+ write_seqcount_end(&groupc->seq);
+ return;
+ }
+
for (s = 0; s < NR_PSI_STATES; s++) {
- if (test_state(groupc->tasks, s))
+ if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU))
state_mask |= (1 << s);
}
@@ -739,9 +775,11 @@ static void psi_group_change(struct psi_group *group, int cpu,
* task in a cgroup is in_memstall, the corresponding groupc
* on that cpu is in PSI_MEM_FULL state.
*/
- if (unlikely(groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall))
+ if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
state_mask |= (1 << PSI_MEM_FULL);
+ record_times(groupc, now);
+
groupc->state_mask = state_mask;
write_seqcount_end(&groupc->seq);
@@ -753,27 +791,12 @@ static void psi_group_change(struct psi_group *group, int cpu,
schedule_delayed_work(&group->avgs_work, PSI_FREQ);
}
-static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
+static inline struct psi_group *task_psi_group(struct task_struct *task)
{
- if (*iter == &psi_system)
- return NULL;
-
#ifdef CONFIG_CGROUPS
- if (static_branch_likely(&psi_cgroups_enabled)) {
- struct cgroup *cgroup = NULL;
-
- if (!*iter)
- cgroup = task->cgroups->dfl_cgrp;
- else
- cgroup = cgroup_parent(*iter);
-
- if (cgroup && cgroup_parent(cgroup)) {
- *iter = cgroup;
- return cgroup_psi(cgroup);
- }
- }
+ if (static_branch_likely(&psi_cgroups_enabled))
+ return cgroup_psi(task_dfl_cgroup(task));
#endif
- *iter = &psi_system;
return &psi_system;
}
@@ -796,8 +819,6 @@ void psi_task_change(struct task_struct *task, int clear, int set)
{
int cpu = task_cpu(task);
struct psi_group *group;
- bool wake_clock = true;
- void *iter = NULL;
u64 now;
if (!task->pid)
@@ -806,19 +827,11 @@ void psi_task_change(struct task_struct *task, int clear, int set)
psi_flags_change(task, clear, set);
now = cpu_clock(cpu);
- /*
- * Periodic aggregation shuts off if there is a period of no
- * task changes, so we wake it back up if necessary. However,
- * don't do this if the task change is the aggregation worker
- * itself going to sleep, or we'll ping-pong forever.
- */
- if (unlikely((clear & TSK_RUNNING) &&
- (task->flags & PF_WQ_WORKER) &&
- wq_worker_last_func(task) == psi_avgs_work))
- wake_clock = false;
- while ((group = iterate_groups(task, &iter)))
- psi_group_change(group, cpu, clear, set, now, wake_clock);
+ group = task_psi_group(task);
+ do {
+ psi_group_change(group, cpu, clear, set, now, true);
+ } while ((group = group->parent));
}
void psi_task_switch(struct task_struct *prev, struct task_struct *next,
@@ -826,34 +839,30 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
{
struct psi_group *group, *common = NULL;
int cpu = task_cpu(prev);
- void *iter;
u64 now = cpu_clock(cpu);
if (next->pid) {
- bool identical_state;
-
psi_flags_change(next, 0, TSK_ONCPU);
/*
- * When switching between tasks that have an identical
- * runtime state, the cgroup that contains both tasks
- * we reach the first common ancestor. Iterate @next's
- * ancestors only until we encounter @prev's ONCPU.
+ * Set TSK_ONCPU on @next's cgroups. If @next shares any
+ * ancestors with @prev, those will already have @prev's
+ * TSK_ONCPU bit set, and we can stop the iteration there.
*/
- identical_state = prev->psi_flags == next->psi_flags;
- iter = NULL;
- while ((group = iterate_groups(next, &iter))) {
- if (identical_state &&
- per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) {
+ group = task_psi_group(next);
+ do {
+ if (per_cpu_ptr(group->pcpu, cpu)->state_mask &
+ PSI_ONCPU) {
common = group;
break;
}
psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
- }
+ } while ((group = group->parent));
}
if (prev->pid) {
int clear = TSK_ONCPU, set = 0;
+ bool wake_clock = true;
/*
* When we're going to sleep, psi_dequeue() lets us
@@ -867,26 +876,74 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
clear |= TSK_MEMSTALL_RUNNING;
if (prev->in_iowait)
set |= TSK_IOWAIT;
+
+ /*
+ * Periodic aggregation shuts off if there is a period of no
+ * task changes, so we wake it back up if necessary. However,
+ * don't do this if the task change is the aggregation worker
+ * itself going to sleep, or we'll ping-pong forever.
+ */
+ if (unlikely((prev->flags & PF_WQ_WORKER) &&
+ wq_worker_last_func(prev) == psi_avgs_work))
+ wake_clock = false;
}
psi_flags_change(prev, clear, set);
- iter = NULL;
- while ((group = iterate_groups(prev, &iter)) && group != common)
- psi_group_change(group, cpu, clear, set, now, true);
+ group = task_psi_group(prev);
+ do {
+ if (group == common)
+ break;
+ psi_group_change(group, cpu, clear, set, now, wake_clock);
+ } while ((group = group->parent));
/*
- * TSK_ONCPU is handled up to the common ancestor. If we're tasked
- * with dequeuing too, finish that for the rest of the hierarchy.
+ * TSK_ONCPU is handled up to the common ancestor. If there are
+ * any other differences between the two tasks (e.g. prev goes
+ * to sleep, or only one task is memstall), finish propagating
+ * those differences all the way up to the root.
*/
- if (sleep) {
+ if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
clear &= ~TSK_ONCPU;
- for (; group; group = iterate_groups(prev, &iter))
- psi_group_change(group, cpu, clear, set, now, true);
+ for (; group; group = group->parent)
+ psi_group_change(group, cpu, clear, set, now, wake_clock);
}
}
}
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+void psi_account_irqtime(struct task_struct *task, u32 delta)
+{
+ int cpu = task_cpu(task);
+ struct psi_group *group;
+ struct psi_group_cpu *groupc;
+ u64 now;
+
+ if (!task->pid)
+ return;
+
+ now = cpu_clock(cpu);
+
+ group = task_psi_group(task);
+ do {
+ if (!group->enabled)
+ continue;
+
+ groupc = per_cpu_ptr(group->pcpu, cpu);
+
+ write_seqcount_begin(&groupc->seq);
+
+ record_times(groupc, now);
+ groupc->times[PSI_IRQ_FULL] += delta;
+
+ write_seqcount_end(&groupc->seq);
+
+ if (group->poll_states & (1 << PSI_IRQ_FULL))
+ psi_schedule_poll_work(group, 1);
+ } while ((group = group->parent));
+}
+#endif
+
/**
* psi_memstall_enter - mark the beginning of a memory stall section
* @flags: flags to handle nested sections
@@ -952,7 +1009,7 @@ EXPORT_SYMBOL_GPL(psi_memstall_leave);
#ifdef CONFIG_CGROUPS
int psi_cgroup_alloc(struct cgroup *cgroup)
{
- if (static_branch_likely(&psi_disabled))
+ if (!static_branch_likely(&psi_cgroups_enabled))
return 0;
cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
@@ -965,12 +1022,13 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
return -ENOMEM;
}
group_init(cgroup->psi);
+ cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup));
return 0;
}
void psi_cgroup_free(struct cgroup *cgroup)
{
- if (static_branch_likely(&psi_disabled))
+ if (!static_branch_likely(&psi_cgroups_enabled))
return;
cancel_delayed_work_sync(&cgroup->psi->avgs_work);
@@ -998,7 +1056,7 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)
struct rq_flags rf;
struct rq *rq;
- if (static_branch_likely(&psi_disabled)) {
+ if (!static_branch_likely(&psi_cgroups_enabled)) {
/*
* Lame to do this here, but the scheduler cannot be locked
* from the outside, so we move cgroups from inside sched/.
@@ -1046,10 +1104,45 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)
task_rq_unlock(rq, task, &rf);
}
+
+void psi_cgroup_restart(struct psi_group *group)
+{
+ int cpu;
+
+ /*
+ * After we disable psi_group->enabled, we don't actually
+ * stop percpu tasks accounting in each psi_group_cpu,
+ * instead only stop test_state() loop, record_times()
+ * and averaging worker, see psi_group_change() for details.
+ *
+ * When disable cgroup PSI, this function has nothing to sync
+ * since cgroup pressure files are hidden and percpu psi_group_cpu
+ * would see !psi_group->enabled and only do task accounting.
+ *
+ * When re-enable cgroup PSI, this function use psi_group_change()
+ * to get correct state mask from test_state() loop on tasks[],
+ * and restart groupc->state_start from now, use .clear = .set = 0
+ * here since no task status really changed.
+ */
+ if (!group->enabled)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+ u64 now;
+
+ rq_lock_irq(rq, &rf);
+ now = cpu_clock(cpu);
+ psi_group_change(group, cpu, 0, 0, now, true);
+ rq_unlock_irq(rq, &rf);
+ }
+}
#endif /* CONFIG_CGROUPS */
int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
{
+ bool only_full = false;
int full;
u64 now;
@@ -1064,7 +1157,11 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
group->avg_next_update = update_averages(group, now);
mutex_unlock(&group->avgs_lock);
- for (full = 0; full < 2; full++) {
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ only_full = res == PSI_IRQ;
+#endif
+
+ for (full = 0; full < 2 - only_full; full++) {
unsigned long avg[3] = { 0, };
u64 total = 0;
int w;
@@ -1078,7 +1175,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
}
seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
- full ? "full" : "some",
+ full || only_full ? "full" : "some",
LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
@@ -1106,6 +1203,11 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
else
return ERR_PTR(-EINVAL);
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ if (res == PSI_IRQ && --state != PSI_IRQ_FULL)
+ return ERR_PTR(-EINVAL);
+#endif
+
if (state >= PSI_NONIDLE)
return ERR_PTR(-EINVAL);
@@ -1390,6 +1492,33 @@ static const struct proc_ops psi_cpu_proc_ops = {
.proc_release = psi_fop_release,
};
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+static int psi_irq_show(struct seq_file *m, void *v)
+{
+ return psi_show(m, &psi_system, PSI_IRQ);
+}
+
+static int psi_irq_open(struct inode *inode, struct file *file)
+{
+ return psi_open(file, psi_irq_show);
+}
+
+static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return psi_write(file, user_buf, nbytes, PSI_IRQ);
+}
+
+static const struct proc_ops psi_irq_proc_ops = {
+ .proc_open = psi_irq_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_write = psi_irq_write,
+ .proc_poll = psi_fop_poll,
+ .proc_release = psi_fop_release,
+};
+#endif
+
static int __init psi_proc_init(void)
{
if (psi_enable) {
@@ -1397,6 +1526,9 @@ static int __init psi_proc_init(void)
proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops);
+#endif
}
return 0;
}
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index baa839c1ba96..84a188913cc9 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -107,6 +107,11 @@ __schedstats_from_se(struct sched_entity *se)
}
#ifdef CONFIG_PSI
+void psi_task_change(struct task_struct *task, int clear, int set);
+void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ bool sleep);
+void psi_account_irqtime(struct task_struct *task, u32 delta);
+
/*
* PSI tracks state that persists across sleeps, such as iowaits and
* memory stalls. As a result, it has to distinguish between sleeps,
@@ -201,6 +206,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) {}
static inline void psi_sched_switch(struct task_struct *prev,
struct task_struct *next,
bool sleep) {}
+static inline void psi_account_irqtime(struct task_struct *task, u32 delta) {}
#endif /* CONFIG_PSI */
#ifdef CONFIG_SCHED_INFO
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index cee5da1e54c4..8058bec87ace 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -310,7 +310,7 @@ static void clocksource_verify_choose_cpus(void)
* CPUs that are currently online.
*/
for (i = 1; i < n; i++) {
- cpu = prandom_u32() % nr_cpu_ids;
+ cpu = prandom_u32_max(nr_cpu_ids);
cpu = cpumask_next(cpu - 1, cpu_online_mask);
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(cpu_online_mask);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 73178b0e43a4..3fc7abffc7aa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -231,6 +231,11 @@ config DEBUG_INFO
in the "Debug information" choice below, indicating that debug
information will be generated for build targets.
+# Clang is known to generate .{s,u}leb128 with symbol deltas with DWARF5, which
+# some targets may not support: https://sourceware.org/bugzilla/show_bug.cgi?id=27215
+config AS_HAS_NON_CONST_LEB128
+ def_bool $(as-instr,.uleb128 .Lexpr_end4 - .Lexpr_start3\n.Lexpr_start3:\n.Lexpr_end4:)
+
choice
prompt "Debug information"
depends on DEBUG_KERNEL
@@ -253,6 +258,7 @@ config DEBUG_INFO_NONE
config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
bool "Rely on the toolchain's implicit default DWARF version"
select DEBUG_INFO
+ depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128)
help
The implicit default version of DWARF debug info produced by a
toolchain changes over time.
@@ -264,7 +270,7 @@ config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
config DEBUG_INFO_DWARF4
bool "Generate DWARF Version 4 debuginfo"
select DEBUG_INFO
- depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
+ depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)
help
Generate DWARF v4 debug info. This requires gcc 4.5+, binutils 2.35.2
if using clang without clang's integrated assembler, and gdb 7.0+.
@@ -276,7 +282,7 @@ config DEBUG_INFO_DWARF4
config DEBUG_INFO_DWARF5
bool "Generate DWARF Version 5 debuginfo"
select DEBUG_INFO
- depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
+ depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128)
help
Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
5.0+ accepts the -gdwarf-5 flag but only had partial support for some
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 05dae05b6cc9..3b9a44008433 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -121,7 +121,7 @@ config KDB_DEFAULT_ENABLE
config KDB_KEYBOARD
bool "KGDB_KDB: keyboard as input device"
- depends on VT && KGDB_KDB
+ depends on VT && KGDB_KDB && !PARISC
default n
help
KDB can use a PS/2 type keyboard for an input device
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
index a72a2c16066e..d4572dbc9145 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/cmdline_kunit.c
@@ -76,7 +76,7 @@ static void cmdline_test_lead_int(struct kunit *test)
int rc = cmdline_test_values[i];
int offset;
- sprintf(in, "%u%s", get_random_int() % 256, str);
+ sprintf(in, "%u%s", get_random_u8(), str);
/* Only first '-' after the number will advance the pointer */
offset = strlen(in) - strlen(str) + !!(rc == 2);
cmdline_do_one_test(test, in, rc, offset);
@@ -94,7 +94,7 @@ static void cmdline_test_tail_int(struct kunit *test)
int rc = strcmp(str, "") ? (strcmp(str, "-") ? 0 : 1) : 1;
int offset;
- sprintf(in, "%s%u", str, get_random_int() % 256);
+ sprintf(in, "%s%u", str, get_random_u8());
/*
* Only first and leading '-' not followed by integer
* will advance the pointer.
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 423784d9c058..96e092de5b72 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -139,7 +139,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
return false;
}
- if (attr->probability <= prandom_u32() % 100)
+ if (attr->probability <= prandom_u32_max(100))
return false;
if (!fail_stacktrace(attr))
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index 10754586403b..7c3c011abd29 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -174,8 +174,8 @@ static int __init find_bit_test(void)
bitmap_zero(bitmap2, BITMAP_LEN);
while (nbits--) {
- __set_bit(prandom_u32() % BITMAP_LEN, bitmap);
- __set_bit(prandom_u32() % BITMAP_LEN, bitmap2);
+ __set_bit(prandom_u32_max(BITMAP_LEN), bitmap);
+ __set_bit(prandom_u32_max(BITMAP_LEN), bitmap2);
}
test_find_next_bit(bitmap, BITMAP_LEN);
diff --git a/lib/kobject.c b/lib/kobject.c
index 5f0e71ab292c..a0b2dbfcfa23 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -694,7 +694,7 @@ static void kobject_release(struct kref *kref)
{
struct kobject *kobj = container_of(kref, struct kobject, kref);
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- unsigned long delay = HZ + HZ * (get_random_int() & 0x3);
+ unsigned long delay = HZ + HZ * prandom_u32_max(4);
pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n",
kobject_name(kobj), kobj, __func__, kobj->parent, delay);
INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
diff --git a/lib/random32.c b/lib/random32.c
index d5d9029362cb..32060b852668 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -47,7 +47,7 @@
* @state: pointer to state structure holding seeded state.
*
* This is used for pseudo-randomness with no outside seeding.
- * For more random results, use prandom_u32().
+ * For more random results, use get_random_u32().
*/
u32 prandom_u32_state(struct rnd_state *state)
{
@@ -69,7 +69,7 @@ EXPORT_SYMBOL(prandom_u32_state);
* @bytes: the requested number of bytes
*
* This is used for pseudo-randomness with no outside seeding.
- * For more random results, use prandom_bytes().
+ * For more random results, use get_random_bytes().
*/
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
{
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
index d9d1c33aebda..848e7eb5da92 100644
--- a/lib/reed_solomon/test_rslib.c
+++ b/lib/reed_solomon/test_rslib.c
@@ -164,7 +164,7 @@ static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
/* Load c with random data and encode */
for (i = 0; i < dlen; i++)
- c[i] = prandom_u32() & nn;
+ c[i] = get_random_u32() & nn;
memset(c + dlen, 0, nroots * sizeof(*c));
encode_rs16(rs, c, dlen, c + dlen, 0);
@@ -178,12 +178,12 @@ static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
for (i = 0; i < errs; i++) {
do {
/* Error value must be nonzero */
- errval = prandom_u32() & nn;
+ errval = get_random_u32() & nn;
} while (errval == 0);
do {
/* Must not choose the same location twice */
- errloc = prandom_u32() % len;
+ errloc = prandom_u32_max(len);
} while (errlocs[errloc] != 0);
errlocs[errloc] = 1;
@@ -194,19 +194,19 @@ static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
for (i = 0; i < eras; i++) {
do {
/* Must not choose the same location twice */
- errloc = prandom_u32() % len;
+ errloc = prandom_u32_max(len);
} while (errlocs[errloc] != 0);
derrlocs[i] = errloc;
- if (ewsc && (prandom_u32() & 1)) {
+ if (ewsc && prandom_u32_max(2)) {
/* Erasure with the symbol intact */
errlocs[errloc] = 2;
} else {
/* Erasure with corrupted symbol */
do {
/* Error value must be nonzero */
- errval = prandom_u32() & nn;
+ errval = get_random_u32() & nn;
} while (errval == 0);
errlocs[errloc] = 1;
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index a8108a962dfd..7280ae8ca88c 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -21,7 +21,7 @@ static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
int i;
for_each_possible_cpu(i)
- *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth;
+ *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32_max(depth);
}
return 0;
}
@@ -33,7 +33,7 @@ static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
hint = this_cpu_read(*sb->alloc_hint);
if (unlikely(hint >= depth)) {
- hint = depth ? prandom_u32() % depth : 0;
+ hint = depth ? prandom_u32_max(depth) : 0;
this_cpu_write(*sb->alloc_hint, hint);
}
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 437d8e6b7cb1..86fadd3ba08c 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -587,7 +587,7 @@ static int __init test_string_helpers_init(void)
for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
test_string_unescape("unescape", i, false);
test_string_unescape("unescape inplace",
- get_random_int() % (UNESCAPE_ANY + 1), true);
+ prandom_u32_max(UNESCAPE_ANY + 1), true);
/* Without dictionary */
for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c
index ed70637a2ffa..e0381b3ec410 100644
--- a/lib/test_fprobe.c
+++ b/lib/test_fprobe.c
@@ -145,7 +145,7 @@ static unsigned long get_ftrace_location(void *func)
static int fprobe_test_init(struct kunit *test)
{
do {
- rand1 = prandom_u32();
+ rand1 = get_random_u32();
} while (rand1 <= div_factor);
target = fprobe_selftest_target;
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 5144899d3c6b..0927f44cd478 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -149,7 +149,7 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize,
static void __init test_hexdump_set(int rowsize, bool ascii)
{
size_t d = min_t(size_t, sizeof(data_b), rowsize);
- size_t len = get_random_int() % d + 1;
+ size_t len = prandom_u32_max(d) + 1;
test_hexdump(len, rowsize, 4, ascii);
test_hexdump(len, rowsize, 2, ascii);
@@ -208,11 +208,11 @@ static void __init test_hexdump_overflow(size_t buflen, size_t len,
static void __init test_hexdump_overflow_set(size_t buflen, bool ascii)
{
unsigned int i = 0;
- int rs = (get_random_int() % 2 + 1) * 16;
+ int rs = (prandom_u32_max(2) + 1) * 16;
do {
int gs = 1 << i;
- size_t len = get_random_int() % rs + gs;
+ size_t len = prandom_u32_max(rs) + gs;
test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii);
} while (i++ < 3);
@@ -223,11 +223,11 @@ static int __init test_hexdump_init(void)
unsigned int i;
int rowsize;
- rowsize = (get_random_int() % 2 + 1) * 16;
+ rowsize = (prandom_u32_max(2) + 1) * 16;
for (i = 0; i < 16; i++)
test_hexdump_set(rowsize, false);
- rowsize = (get_random_int() % 2 + 1) * 16;
+ rowsize = (prandom_u32_max(2) + 1) * 16;
for (i = 0; i < 16; i++)
test_hexdump_set(rowsize, true);
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 6a33f6b1b465..67e6f83fe0f8 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -100,6 +100,7 @@ struct dmirror {
struct dmirror_chunk {
struct dev_pagemap pagemap;
struct dmirror_device *mdevice;
+ bool remove;
};
/*
@@ -192,11 +193,15 @@ static int dmirror_fops_release(struct inode *inode, struct file *filp)
return 0;
}
+static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page)
+{
+ return container_of(page->pgmap, struct dmirror_chunk, pagemap);
+}
+
static struct dmirror_device *dmirror_page_to_device(struct page *page)
{
- return container_of(page->pgmap, struct dmirror_chunk,
- pagemap)->mdevice;
+ return dmirror_page_to_chunk(page)->mdevice;
}
static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)
@@ -627,8 +632,8 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
goto error;
}
+ zone_device_page_init(dpage);
dpage->zone_device_data = rpage;
- lock_page(dpage);
return dpage;
error:
@@ -907,7 +912,7 @@ static int dmirror_migrate_to_system(struct dmirror *dmirror,
struct vm_area_struct *vma;
unsigned long src_pfns[64] = { 0 };
unsigned long dst_pfns[64] = { 0 };
- struct migrate_vma args;
+ struct migrate_vma args = { 0 };
unsigned long next;
int ret;
@@ -968,7 +973,7 @@ static int dmirror_migrate_to_device(struct dmirror *dmirror,
unsigned long src_pfns[64] = { 0 };
unsigned long dst_pfns[64] = { 0 };
struct dmirror_bounce bounce;
- struct migrate_vma args;
+ struct migrate_vma args = { 0 };
unsigned long next;
int ret;
@@ -1218,6 +1223,85 @@ static int dmirror_snapshot(struct dmirror *dmirror,
return ret;
}
+static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+{
+ unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
+ unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
+ unsigned long npages = end_pfn - start_pfn + 1;
+ unsigned long i;
+ unsigned long *src_pfns;
+ unsigned long *dst_pfns;
+
+ src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+ dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+
+ migrate_device_range(src_pfns, start_pfn, npages);
+ for (i = 0; i < npages; i++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage)))
+ continue;
+ spage = BACKING_PAGE(spage);
+ dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
+ lock_page(dpage);
+ copy_highpage(dpage, spage);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ if (src_pfns[i] & MIGRATE_PFN_WRITE)
+ dst_pfns[i] |= MIGRATE_PFN_WRITE;
+ }
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kfree(src_pfns);
+ kfree(dst_pfns);
+}
+
+/* Removes free pages from the free list so they can't be re-allocated */
+static void dmirror_remove_free_pages(struct dmirror_chunk *devmem)
+{
+ struct dmirror_device *mdevice = devmem->mdevice;
+ struct page *page;
+
+ for (page = mdevice->free_pages; page; page = page->zone_device_data)
+ if (dmirror_page_to_chunk(page) == devmem)
+ mdevice->free_pages = page->zone_device_data;
+}
+
+static void dmirror_device_remove_chunks(struct dmirror_device *mdevice)
+{
+ unsigned int i;
+
+ mutex_lock(&mdevice->devmem_lock);
+ if (mdevice->devmem_chunks) {
+ for (i = 0; i < mdevice->devmem_count; i++) {
+ struct dmirror_chunk *devmem =
+ mdevice->devmem_chunks[i];
+
+ spin_lock(&mdevice->lock);
+ devmem->remove = true;
+ dmirror_remove_free_pages(devmem);
+ spin_unlock(&mdevice->lock);
+
+ dmirror_device_evict_chunk(devmem);
+ memunmap_pages(&devmem->pagemap);
+ if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+ release_mem_region(devmem->pagemap.range.start,
+ range_len(&devmem->pagemap.range));
+ kfree(devmem);
+ }
+ mdevice->devmem_count = 0;
+ mdevice->devmem_capacity = 0;
+ mdevice->free_pages = NULL;
+ kfree(mdevice->devmem_chunks);
+ mdevice->devmem_chunks = NULL;
+ }
+ mutex_unlock(&mdevice->devmem_lock);
+}
+
static long dmirror_fops_unlocked_ioctl(struct file *filp,
unsigned int command,
unsigned long arg)
@@ -1272,6 +1356,11 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp,
ret = dmirror_snapshot(dmirror, &cmd);
break;
+ case HMM_DMIRROR_RELEASE:
+ dmirror_device_remove_chunks(dmirror->mdevice);
+ ret = 0;
+ break;
+
default:
return -EINVAL;
}
@@ -1326,15 +1415,19 @@ static void dmirror_devmem_free(struct page *page)
mdevice = dmirror_page_to_device(page);
spin_lock(&mdevice->lock);
- mdevice->cfree++;
- page->zone_device_data = mdevice->free_pages;
- mdevice->free_pages = page;
+
+ /* Return page to our allocator if not freeing the chunk */
+ if (!dmirror_page_to_chunk(page)->remove) {
+ mdevice->cfree++;
+ page->zone_device_data = mdevice->free_pages;
+ mdevice->free_pages = page;
+ }
spin_unlock(&mdevice->lock);
}
static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
{
- struct migrate_vma args;
+ struct migrate_vma args = { 0 };
unsigned long src_pfns = 0;
unsigned long dst_pfns = 0;
struct page *rpage;
@@ -1357,6 +1450,7 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
args.dst = &dst_pfns;
args.pgmap_owner = dmirror->mdevice;
args.flags = dmirror_select_device(dmirror);
+ args.fault_page = vmf->page;
if (migrate_vma_setup(&args))
return VM_FAULT_SIGBUS;
@@ -1407,22 +1501,7 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id)
static void dmirror_device_remove(struct dmirror_device *mdevice)
{
- unsigned int i;
-
- if (mdevice->devmem_chunks) {
- for (i = 0; i < mdevice->devmem_count; i++) {
- struct dmirror_chunk *devmem =
- mdevice->devmem_chunks[i];
-
- memunmap_pages(&devmem->pagemap);
- if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
- release_mem_region(devmem->pagemap.range.start,
- range_len(&devmem->pagemap.range));
- kfree(devmem);
- }
- kfree(mdevice->devmem_chunks);
- }
-
+ dmirror_device_remove_chunks(mdevice);
cdev_device_del(&mdevice->cdevice, &mdevice->device);
}
diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h
index e31d58c9034a..8c818a2cf4f6 100644
--- a/lib/test_hmm_uapi.h
+++ b/lib/test_hmm_uapi.h
@@ -36,6 +36,7 @@ struct hmm_dmirror_cmd {
#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x04, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_RELEASE _IOWR('H', 0x07, struct hmm_dmirror_cmd)
/*
* Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.
diff --git a/lib/test_kprobes.c b/lib/test_kprobes.c
index a5edc2ebc947..eeb1d728d974 100644
--- a/lib/test_kprobes.c
+++ b/lib/test_kprobes.c
@@ -341,7 +341,7 @@ static int kprobes_test_init(struct kunit *test)
stacktrace_driver = kprobe_stacktrace_driver;
do {
- rand1 = prandom_u32();
+ rand1 = get_random_u32();
} while (rand1 <= div_factor);
return 0;
}
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
index ade7a1ea0c8e..19ff229b9c3a 100644
--- a/lib/test_list_sort.c
+++ b/lib/test_list_sort.c
@@ -71,7 +71,7 @@ static void list_sort_test(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
/* force some equivalencies */
- el->value = prandom_u32() % (TEST_LIST_LEN / 3);
+ el->value = prandom_u32_max(TEST_LIST_LEN / 3);
el->serial = i;
el->poison1 = TEST_POISON1;
el->poison2 = TEST_POISON2;
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index c95db11a6906..60e1984c060f 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -67,17 +67,24 @@ static int __init do_alloc_pages_order(int order, int *total_failures)
size_t size = PAGE_SIZE << order;
page = alloc_pages(GFP_KERNEL, order);
+ if (!page)
+ goto err;
buf = page_address(page);
fill_with_garbage(buf, size);
__free_pages(page, order);
page = alloc_pages(GFP_KERNEL, order);
+ if (!page)
+ goto err;
buf = page_address(page);
if (count_nonzero_bytes(buf, size))
(*total_failures)++;
fill_with_garbage(buf, size);
__free_pages(page, order);
return 1;
+err:
+ (*total_failures)++;
+ return 1;
}
/* Test the page allocator by calling alloc_pages with different orders. */
@@ -100,15 +107,22 @@ static int __init do_kmalloc_size(size_t size, int *total_failures)
void *buf;
buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ goto err;
fill_with_garbage(buf, size);
kfree(buf);
buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ goto err;
if (count_nonzero_bytes(buf, size))
(*total_failures)++;
fill_with_garbage(buf, size);
kfree(buf);
return 1;
+err:
+ (*total_failures)++;
+ return 1;
}
/* Test vmalloc() with given parameters. */
@@ -117,15 +131,22 @@ static int __init do_vmalloc_size(size_t size, int *total_failures)
void *buf;
buf = vmalloc(size);
+ if (!buf)
+ goto err;
fill_with_garbage(buf, size);
vfree(buf);
buf = vmalloc(size);
+ if (!buf)
+ goto err;
if (count_nonzero_bytes(buf, size))
(*total_failures)++;
fill_with_garbage(buf, size);
vfree(buf);
return 1;
+err:
+ (*total_failures)++;
+ return 1;
}
/* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c
index d19c8080fd4d..7b01b4387cfb 100644
--- a/lib/test_min_heap.c
+++ b/lib/test_min_heap.c
@@ -83,7 +83,7 @@ static __init int test_heapify_all(bool min_heap)
/* Test with randomly generated values. */
heap.nr = ARRAY_SIZE(values);
for (i = 0; i < heap.nr; i++)
- values[i] = get_random_int();
+ values[i] = get_random_u32();
min_heapify_all(&heap, &funcs);
err += pop_verify_heap(min_heap, &heap, &funcs);
@@ -116,7 +116,7 @@ static __init int test_heap_push(bool min_heap)
/* Test with randomly generated values. */
while (heap.nr < heap.size) {
- temp = get_random_int();
+ temp = get_random_u32();
min_heap_push(&heap, &temp, &funcs);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
@@ -158,7 +158,7 @@ static __init int test_heap_pop_push(bool min_heap)
/* Test with randomly generated values. */
for (i = 0; i < ARRAY_SIZE(data); i++) {
- temp = get_random_int();
+ temp = get_random_u32();
min_heap_pop_push(&heap, &temp, &funcs);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index da137939a410..c0c957c50635 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -157,7 +157,7 @@ static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
int err;
if (should_create_root)
- prandom_bytes(world->next_root_buf,
+ get_random_bytes(world->next_root_buf,
sizeof(world->next_root_buf));
objagg_obj = world_obj_get(world, objagg, key_id);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 5a1dd4736b56..b358a74ed7ed 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -291,7 +291,7 @@ static int __init test_rhltable(unsigned int entries)
if (WARN_ON(err))
goto out_free;
- k = prandom_u32();
+ k = get_random_u32();
ret = 0;
for (i = 0; i < entries; i++) {
rhl_test_objects[i].value.id = k;
@@ -369,12 +369,12 @@ static int __init test_rhltable(unsigned int entries)
pr_info("test %d random rhlist add/delete operations\n", entries);
for (j = 0; j < entries; j++) {
u32 i = prandom_u32_max(entries);
- u32 prand = prandom_u32();
+ u32 prand = get_random_u32();
cond_resched();
if (prand == 0)
- prand = prandom_u32();
+ prand = get_random_u32();
if (prand & 1) {
prand >>= 1;
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 4f2f2d1bac56..cf7780572f5b 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -80,7 +80,7 @@ static int random_size_align_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- rnd = prandom_u32();
+ rnd = get_random_u8();
/*
* Maximum 1024 pages, if PAGE_SIZE is 4096.
@@ -151,9 +151,7 @@ static int random_size_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- n = prandom_u32();
- n = (n % 100) + 1;
-
+ n = prandom_u32_max(100) + 1;
p = vmalloc(n * PAGE_SIZE);
if (!p)
@@ -293,16 +291,12 @@ pcpu_alloc_test(void)
return -1;
for (i = 0; i < 35000; i++) {
- unsigned int r;
-
- r = prandom_u32();
- size = (r % (PAGE_SIZE / 4)) + 1;
+ size = prandom_u32_max(PAGE_SIZE / 4) + 1;
/*
* Maximum PAGE_SIZE
*/
- r = prandom_u32();
- align = 1 << ((r % 11) + 1);
+ align = 1 << (prandom_u32_max(11) + 1);
pcpu[i] = __alloc_percpu(size, align);
if (!pcpu[i])
@@ -393,14 +387,11 @@ static struct test_driver {
static void shuffle_array(int *arr, int n)
{
- unsigned int rnd;
int i, j;
for (i = n - 1; i > 0; i--) {
- rnd = prandom_u32();
-
/* Cut the range. */
- j = rnd % i;
+ j = prandom_u32_max(i);
/* Swap indexes. */
swap(arr[i], arr[j]);
diff --git a/lib/uuid.c b/lib/uuid.c
index 562d53977cab..e309b4c5be3d 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(generate_random_guid);
static void __uuid_gen_common(__u8 b[16])
{
- prandom_bytes(b, 16);
+ get_random_bytes(b, 16);
/* reversion 0b10 */
b[8] = (b[8] & 0x3F) | 0x80;
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 2dd02c4683c4..c51f7f545afe 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1847,7 +1847,6 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
pfn = cc->zone->zone_start_pfn;
cc->fast_search_fail = 0;
found_block = true;
- set_pageblock_skip(freepage);
break;
}
}
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 8e1ab38d0f1f..36d098d06c55 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -491,7 +491,7 @@ static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t)
- sz += r->ar.end - r->ar.start;
+ sz += damon_sz_region(r);
}
if (ctx->attrs.min_nr_regions)
@@ -674,7 +674,7 @@ static bool __damos_valid_target(struct damon_region *r, struct damos *s)
{
unsigned long sz;
- sz = r->ar.end - r->ar.start;
+ sz = damon_sz_region(r);
return s->pattern.min_sz_region <= sz &&
sz <= s->pattern.max_sz_region &&
s->pattern.min_nr_accesses <= r->nr_accesses &&
@@ -702,7 +702,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
damon_for_each_scheme(s, c) {
struct damos_quota *quota = &s->quota;
- unsigned long sz = r->ar.end - r->ar.start;
+ unsigned long sz = damon_sz_region(r);
struct timespec64 begin, end;
unsigned long sz_applied = 0;
@@ -731,14 +731,14 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
sz = ALIGN_DOWN(quota->charge_addr_from -
r->ar.start, DAMON_MIN_REGION);
if (!sz) {
- if (r->ar.end - r->ar.start <=
- DAMON_MIN_REGION)
+ if (damon_sz_region(r) <=
+ DAMON_MIN_REGION)
continue;
sz = DAMON_MIN_REGION;
}
damon_split_region_at(t, r, sz);
r = damon_next_region(r);
- sz = r->ar.end - r->ar.start;
+ sz = damon_sz_region(r);
}
quota->charge_target_from = NULL;
quota->charge_addr_from = 0;
@@ -843,8 +843,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
continue;
score = c->ops.get_scheme_score(
c, t, r, s);
- quota->histogram[score] +=
- r->ar.end - r->ar.start;
+ quota->histogram[score] += damon_sz_region(r);
if (score > max_score)
max_score = score;
}
@@ -865,18 +864,13 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
}
}
-static inline unsigned long sz_damon_region(struct damon_region *r)
-{
- return r->ar.end - r->ar.start;
-}
-
/*
* Merge two adjacent regions into one region
*/
static void damon_merge_two_regions(struct damon_target *t,
struct damon_region *l, struct damon_region *r)
{
- unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r);
+ unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r);
l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
(sz_l + sz_r);
@@ -905,7 +899,7 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres,
if (prev && prev->ar.end == r->ar.start &&
abs(prev->nr_accesses - r->nr_accesses) <= thres &&
- sz_damon_region(prev) + sz_damon_region(r) <= sz_limit)
+ damon_sz_region(prev) + damon_sz_region(r) <= sz_limit)
damon_merge_two_regions(t, prev, r);
else
prev = r;
@@ -963,7 +957,7 @@ static void damon_split_regions_of(struct damon_target *t, int nr_subs)
int i;
damon_for_each_region_safe(r, next, t) {
- sz_region = r->ar.end - r->ar.start;
+ sz_region = damon_sz_region(r);
for (i = 0; i < nr_subs - 1 &&
sz_region > 2 * DAMON_MIN_REGION; i++) {
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index ea94e0b2c311..15f03df66db6 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -72,7 +72,7 @@ static int damon_va_evenly_split_region(struct damon_target *t,
return -EINVAL;
orig_end = r->ar.end;
- sz_orig = r->ar.end - r->ar.start;
+ sz_orig = damon_sz_region(r);
sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
if (!sz_piece)
@@ -618,7 +618,7 @@ static unsigned long damos_madvise(struct damon_target *target,
{
struct mm_struct *mm;
unsigned long start = PAGE_ALIGN(r->ar.start);
- unsigned long len = PAGE_ALIGN(r->ar.end - r->ar.start);
+ unsigned long len = PAGE_ALIGN(damon_sz_region(r));
unsigned long applied;
mm = damon_get_mm(target);
diff --git a/mm/highmem.c b/mm/highmem.c
index c707d7202d5f..db251e77f98f 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -30,6 +30,17 @@
#include <asm/tlbflush.h>
#include <linux/vmalloc.h>
+#ifdef CONFIG_KMAP_LOCAL
+static inline int kmap_local_calc_idx(int idx)
+{
+ return idx + KM_MAX_IDX * smp_processor_id();
+}
+
+#ifndef arch_kmap_local_map_idx
+#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
+#endif
+#endif /* CONFIG_KMAP_LOCAL */
+
/*
* Virtual_count is not a pure "count".
* 0 means that it is not mapped, and has not been mapped
@@ -142,12 +153,29 @@ pte_t *pkmap_page_table;
struct page *__kmap_to_page(void *vaddr)
{
+ unsigned long base = (unsigned long) vaddr & PAGE_MASK;
+ struct kmap_ctrl *kctrl = &current->kmap_ctrl;
unsigned long addr = (unsigned long)vaddr;
+ int i;
+
+ /* kmap() mappings */
+ if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) &&
+ addr < PKMAP_ADDR(LAST_PKMAP)))
+ return pte_page(pkmap_page_table[PKMAP_NR(addr)]);
- if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
- int i = PKMAP_NR(addr);
+ /* kmap_local_page() mappings */
+ if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) &&
+ base < __fix_to_virt(FIX_KMAP_BEGIN))) {
+ for (i = 0; i < kctrl->idx; i++) {
+ unsigned long base_addr;
+ int idx;
- return pte_page(pkmap_page_table[i]);
+ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
+ base_addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+
+ if (base_addr == base)
+ return pte_page(kctrl->pteval[i]);
+ }
}
return virt_to_page(vaddr);
@@ -462,10 +490,6 @@ static inline void kmap_local_idx_pop(void)
# define arch_kmap_local_post_unmap(vaddr) do { } while (0)
#endif
-#ifndef arch_kmap_local_map_idx
-#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
-#endif
-
#ifndef arch_kmap_local_unmap_idx
#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
#endif
@@ -494,11 +518,6 @@ static inline bool kmap_high_unmap_local(unsigned long vaddr)
return false;
}
-static inline int kmap_local_calc_idx(int idx)
-{
- return idx + KM_MAX_IDX * smp_processor_id();
-}
-
static pte_t *__kmap_pte;
static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 57b7b0b5d9eb..b586cdd75930 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5096,6 +5096,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
* unmapped and its refcount is dropped, so just clear pte here.
*/
if (unlikely(!pte_present(pte))) {
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
/*
* If the pte was wr-protected by uffd-wp in any of the
* swap forms, meanwhile the caller does not want to
@@ -5107,6 +5108,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
set_huge_pte_at(mm, address, ptep,
make_pte_marker(PTE_MARKER_UFFD_WP));
else
+#endif
huge_pte_clear(mm, address, ptep, sz);
spin_unlock(ptl);
continue;
@@ -5135,11 +5137,13 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
if (huge_pte_dirty(pte))
set_page_dirty(page);
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
/* Leave a uffd-wp pte marker if needed */
if (huge_pte_uffd_wp(pte) &&
!(zap_flags & ZAP_FLAG_DROP_MARKER))
set_huge_pte_at(mm, address, ptep,
make_pte_marker(PTE_MARKER_UFFD_WP));
+#endif
hugetlb_count_sub(pages_per_huge_page(h), mm);
page_remove_rmap(page, vma, true);
@@ -5531,6 +5535,23 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
return handle_userfault(&vmf, reason);
}
+/*
+ * Recheck pte with pgtable lock. Returns true if pte didn't change, or
+ * false if pte changed or is changing.
+ */
+static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
+ pte_t *ptep, pte_t old_pte)
+{
+ spinlock_t *ptl;
+ bool same;
+
+ ptl = huge_pte_lock(h, mm, ptep);
+ same = pte_same(huge_ptep_get(ptep), old_pte);
+ spin_unlock(ptl);
+
+ return same;
+}
+
static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
struct vm_area_struct *vma,
struct address_space *mapping, pgoff_t idx,
@@ -5571,10 +5592,33 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
if (idx >= size)
goto out;
/* Check for page in userfault range */
- if (userfaultfd_missing(vma))
- return hugetlb_handle_userfault(vma, mapping, idx,
- flags, haddr, address,
- VM_UFFD_MISSING);
+ if (userfaultfd_missing(vma)) {
+ /*
+ * Since hugetlb_no_page() was examining pte
+ * without pgtable lock, we need to re-test under
+ * lock because the pte may not be stable and could
+ * have changed from under us. Try to detect
+ * either changed or during-changing ptes and retry
+ * properly when needed.
+ *
+ * Note that userfaultfd is actually fine with
+ * false positives (e.g. caused by pte changed),
+ * but not wrong logical events (e.g. caused by
+ * reading a pte during changing). The latter can
+ * confuse the userspace, so the strictness is very
+ * much preferred. E.g., MISSING event should
+ * never happen on the page after UFFDIO_COPY has
+ * correctly installed the page and returned.
+ */
+ if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
+ ret = 0;
+ goto out;
+ }
+
+ return hugetlb_handle_userfault(vma, mapping, idx, flags,
+ haddr, address,
+ VM_UFFD_MISSING);
+ }
page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) {
@@ -5590,11 +5634,10 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
* here. Before returning error, get ptl and make
* sure there really is no pte entry.
*/
- ptl = huge_pte_lock(h, mm, ptep);
- ret = 0;
- if (huge_pte_none(huge_ptep_get(ptep)))
+ if (hugetlb_pte_stable(h, mm, ptep, old_pte))
ret = vmf_error(PTR_ERR(page));
- spin_unlock(ptl);
+ else
+ ret = 0;
goto out;
}
clear_huge_page(page, address, pages_per_huge_page(h));
@@ -5640,9 +5683,14 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
if (userfaultfd_minor(vma)) {
unlock_page(page);
put_page(page);
- return hugetlb_handle_userfault(vma, mapping, idx,
- flags, haddr, address,
- VM_UFFD_MINOR);
+ /* See comment in userfaultfd_missing() block above */
+ if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
+ ret = 0;
+ goto out;
+ }
+ return hugetlb_handle_userfault(vma, mapping, idx, flags,
+ haddr, address,
+ VM_UFFD_MINOR);
}
}
@@ -6804,7 +6852,7 @@ void hugetlb_vma_lock_release(struct kref *kref)
kfree(vma_lock);
}
-void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
+static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
{
struct vm_area_struct *vma = vma_lock->vma;
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index f25692def781..0d59098f0876 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -295,6 +295,9 @@ static void krealloc_more_oob_helper(struct kunit *test,
ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+ /* Suppress -Warray-bounds warnings. */
+ OPTIMIZER_HIDE_VAR(ptr2);
+
/* All offsets up to size2 must be accessible. */
ptr2[size1 - 1] = 'x';
ptr2[size1] = 'x';
@@ -327,6 +330,9 @@ static void krealloc_less_oob_helper(struct kunit *test,
ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+ /* Suppress -Warray-bounds warnings. */
+ OPTIMIZER_HIDE_VAR(ptr2);
+
/* Must be accessible for all modes. */
ptr2[size2 - 1] = 'x';
@@ -540,13 +546,14 @@ static void kmalloc_memmove_invalid_size(struct kunit *test)
{
char *ptr;
size_t size = 64;
- volatile size_t invalid_size = size;
+ size_t invalid_size = size;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset((char *)ptr, 0, 64);
OPTIMIZER_HIDE_VAR(ptr);
+ OPTIMIZER_HIDE_VAR(invalid_size);
KUNIT_EXPECT_KASAN_FAIL(test,
memmove((char *)ptr, (char *)ptr + 4, invalid_size));
kfree(ptr);
@@ -1292,7 +1299,7 @@ static void match_all_not_assigned(struct kunit *test)
KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
for (i = 0; i < 256; i++) {
- size = (get_random_int() % 1024) + 1;
+ size = prandom_u32_max(1024) + 1;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
@@ -1301,7 +1308,7 @@ static void match_all_not_assigned(struct kunit *test)
}
for (i = 0; i < 256; i++) {
- order = (get_random_int() % 4) + 1;
+ order = prandom_u32_max(4) + 1;
pages = alloc_pages(GFP_KERNEL, order);
ptr = page_address(pages);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -1314,7 +1321,7 @@ static void match_all_not_assigned(struct kunit *test)
return;
for (i = 0; i < 256; i++) {
- size = (get_random_int() % 1024) + 1;
+ size = prandom_u32_max(1024) + 1;
ptr = vmalloc(size);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
diff --git a/mm/memory.c b/mm/memory.c
index df678fa30cdb..f88c351aecd4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1393,10 +1393,12 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte,
struct zap_details *details, pte_t pteval)
{
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
if (zap_drop_file_uffd_wp(details))
return;
pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
+#endif
}
static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -3748,7 +3750,21 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
ret = remove_device_exclusive_entry(vmf);
} else if (is_device_private_entry(entry)) {
vmf->page = pfn_swap_entry_to_page(entry);
- ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
+ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
+ vmf->address, &vmf->ptl);
+ if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
+ spin_unlock(vmf->ptl);
+ goto out;
+ }
+
+ /*
+ * Get a page reference while we know the page can't be
+ * freed.
+ */
+ get_page(vmf->page);
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ vmf->page->pgmap->ops->migrate_to_ram(vmf);
+ put_page(vmf->page);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
} else if (is_swapin_error_entry(entry)) {
@@ -4118,7 +4134,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (!pte_none(*vmf->pte)) {
- update_mmu_cache(vma, vmf->address, vmf->pte);
+ update_mmu_tlb(vma, vmf->address, vmf->pte);
goto release;
}
diff --git a/mm/memremap.c b/mm/memremap.c
index 25029a474d30..421bec3a29ee 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -138,8 +138,11 @@ void memunmap_pages(struct dev_pagemap *pgmap)
int i;
percpu_ref_kill(&pgmap->ref);
- for (i = 0; i < pgmap->nr_range; i++)
- percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i));
+ if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
+ pgmap->type != MEMORY_DEVICE_COHERENT)
+ for (i = 0; i < pgmap->nr_range; i++)
+ percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i));
+
wait_for_completion(&pgmap->done);
for (i = 0; i < pgmap->nr_range; i++)
@@ -264,7 +267,9 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
PHYS_PFN(range->start),
PHYS_PFN(range_len(range)), pgmap);
- percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id));
+ if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
+ pgmap->type != MEMORY_DEVICE_COHERENT)
+ percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id));
return 0;
err_add_memory:
@@ -502,11 +507,28 @@ void free_zone_device_page(struct page *page)
page->mapping = NULL;
page->pgmap->ops->page_free(page);
+ if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
+ page->pgmap->type != MEMORY_DEVICE_COHERENT)
+ /*
+ * Reset the page count to 1 to prepare for handing out the page
+ * again.
+ */
+ set_page_count(page, 1);
+ else
+ put_dev_pagemap(page->pgmap);
+}
+
+void zone_device_page_init(struct page *page)
+{
/*
- * Reset the page count to 1 to prepare for handing out the page again.
+ * Drivers shouldn't be allocating pages after calling
+ * memunmap_pages().
*/
+ WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref));
set_page_count(page, 1);
+ lock_page(page);
}
+EXPORT_SYMBOL_GPL(zone_device_page_init);
#ifdef CONFIG_FS_DAX
bool __put_devmap_managed_page_refs(struct page *page, int refs)
diff --git a/mm/migrate.c b/mm/migrate.c
index c228afba0963..1379e1912772 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -625,6 +625,25 @@ EXPORT_SYMBOL(folio_migrate_copy);
* Migration functions
***********************************************************/
+int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode, int extra_count)
+{
+ int rc;
+
+ BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
+
+ rc = folio_migrate_mapping(mapping, dst, src, extra_count);
+
+ if (rc != MIGRATEPAGE_SUCCESS)
+ return rc;
+
+ if (mode != MIGRATE_SYNC_NO_COPY)
+ folio_migrate_copy(dst, src);
+ else
+ folio_migrate_flags(dst, src);
+ return MIGRATEPAGE_SUCCESS;
+}
+
/**
* migrate_folio() - Simple folio migration.
* @mapping: The address_space containing the folio.
@@ -640,20 +659,7 @@ EXPORT_SYMBOL(folio_migrate_copy);
int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode)
{
- int rc;
-
- BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
-
- rc = folio_migrate_mapping(mapping, dst, src, 0);
-
- if (rc != MIGRATEPAGE_SUCCESS)
- return rc;
-
- if (mode != MIGRATE_SYNC_NO_COPY)
- folio_migrate_copy(dst, src);
- else
- folio_migrate_flags(dst, src);
- return MIGRATEPAGE_SUCCESS;
+ return migrate_folio_extra(mapping, dst, src, mode, 0);
}
EXPORT_SYMBOL(migrate_folio);
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 5ab6ab9d2ed8..6fa682eef7a0 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -325,14 +325,14 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
* folio_migrate_mapping(), except that here we allow migration of a
* ZONE_DEVICE page.
*/
-static bool migrate_vma_check_page(struct page *page)
+static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
{
/*
* One extra ref because caller holds an extra reference, either from
* isolate_lru_page() for a regular page, or migrate_vma_collect() for
* a device page.
*/
- int extra = 1;
+ int extra = 1 + (page == fault_page);
/*
* FIXME support THP (transparent huge page), it is bit more complex to
@@ -357,26 +357,20 @@ static bool migrate_vma_check_page(struct page *page)
}
/*
- * migrate_vma_unmap() - replace page mapping with special migration pte entry
- * @migrate: migrate struct containing all migration information
- *
- * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
- * special migration pte entry and check if it has been pinned. Pinned pages are
- * restored because we cannot migrate them.
- *
- * This is the last step before we call the device driver callback to allocate
- * destination memory and copy contents of original page over to new page.
+ * Unmaps pages for migration. Returns number of unmapped pages.
*/
-static void migrate_vma_unmap(struct migrate_vma *migrate)
+static unsigned long migrate_device_unmap(unsigned long *src_pfns,
+ unsigned long npages,
+ struct page *fault_page)
{
- const unsigned long npages = migrate->npages;
unsigned long i, restore = 0;
bool allow_drain = true;
+ unsigned long unmapped = 0;
lru_add_drain();
for (i = 0; i < npages; i++) {
- struct page *page = migrate_pfn_to_page(migrate->src[i]);
+ struct page *page = migrate_pfn_to_page(src_pfns[i]);
struct folio *folio;
if (!page)
@@ -391,8 +385,7 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
}
if (isolate_lru_page(page)) {
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
- migrate->cpages--;
+ src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
restore++;
continue;
}
@@ -405,34 +398,55 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
if (folio_mapped(folio))
try_to_migrate(folio, 0);
- if (page_mapped(page) || !migrate_vma_check_page(page)) {
+ if (page_mapped(page) ||
+ !migrate_vma_check_page(page, fault_page)) {
if (!is_zone_device_page(page)) {
get_page(page);
putback_lru_page(page);
}
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
- migrate->cpages--;
+ src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
restore++;
continue;
}
+
+ unmapped++;
}
for (i = 0; i < npages && restore; i++) {
- struct page *page = migrate_pfn_to_page(migrate->src[i]);
+ struct page *page = migrate_pfn_to_page(src_pfns[i]);
struct folio *folio;
- if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
+ if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE))
continue;
folio = page_folio(page);
remove_migration_ptes(folio, folio, false);
- migrate->src[i] = 0;
+ src_pfns[i] = 0;
folio_unlock(folio);
folio_put(folio);
restore--;
}
+
+ return unmapped;
+}
+
+/*
+ * migrate_vma_unmap() - replace page mapping with special migration pte entry
+ * @migrate: migrate struct containing all migration information
+ *
+ * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
+ * special migration pte entry and check if it has been pinned. Pinned pages are
+ * restored because we cannot migrate them.
+ *
+ * This is the last step before we call the device driver callback to allocate
+ * destination memory and copy contents of original page over to new page.
+ */
+static void migrate_vma_unmap(struct migrate_vma *migrate)
+{
+ migrate->cpages = migrate_device_unmap(migrate->src, migrate->npages,
+ migrate->fault_page);
}
/**
@@ -517,6 +531,8 @@ int migrate_vma_setup(struct migrate_vma *args)
return -EINVAL;
if (!args->src || !args->dst)
return -EINVAL;
+ if (args->fault_page && !is_device_private_page(args->fault_page))
+ return -EINVAL;
memset(args->src, 0, sizeof(*args->src) * nr_pages);
args->cpages = 0;
@@ -677,42 +693,38 @@ abort:
*src &= ~MIGRATE_PFN_MIGRATE;
}
-/**
- * migrate_vma_pages() - migrate meta-data from src page to dst page
- * @migrate: migrate struct containing all migration information
- *
- * This migrates struct page meta-data from source struct page to destination
- * struct page. This effectively finishes the migration from source page to the
- * destination page.
- */
-void migrate_vma_pages(struct migrate_vma *migrate)
+static void __migrate_device_pages(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages,
+ struct migrate_vma *migrate)
{
- const unsigned long npages = migrate->npages;
- const unsigned long start = migrate->start;
struct mmu_notifier_range range;
- unsigned long addr, i;
+ unsigned long i;
bool notified = false;
- for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
- struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
- struct page *page = migrate_pfn_to_page(migrate->src[i]);
+ for (i = 0; i < npages; i++) {
+ struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
+ struct page *page = migrate_pfn_to_page(src_pfns[i]);
struct address_space *mapping;
int r;
if (!newpage) {
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+ src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
continue;
}
if (!page) {
+ unsigned long addr;
+
+ if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
/*
* The only time there is no vma is when called from
* migrate_device_coherent_page(). However this isn't
* called if the page could not be unmapped.
*/
- VM_BUG_ON(!migrate->vma);
- if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
- continue;
+ VM_BUG_ON(!migrate);
+ addr = migrate->start + i*PAGE_SIZE;
if (!notified) {
notified = true;
@@ -723,7 +735,7 @@ void migrate_vma_pages(struct migrate_vma *migrate)
mmu_notifier_invalidate_range_start(&range);
}
migrate_vma_insert_page(migrate, addr, newpage,
- &migrate->src[i]);
+ &src_pfns[i]);
continue;
}
@@ -736,21 +748,26 @@ void migrate_vma_pages(struct migrate_vma *migrate)
* device private or coherent memory.
*/
if (mapping) {
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+ src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
continue;
}
} else if (is_zone_device_page(newpage)) {
/*
* Other types of ZONE_DEVICE page are not supported.
*/
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+ src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
continue;
}
- r = migrate_folio(mapping, page_folio(newpage),
- page_folio(page), MIGRATE_SYNC_NO_COPY);
+ if (migrate && migrate->fault_page == page)
+ r = migrate_folio_extra(mapping, page_folio(newpage),
+ page_folio(page),
+ MIGRATE_SYNC_NO_COPY, 1);
+ else
+ r = migrate_folio(mapping, page_folio(newpage),
+ page_folio(page), MIGRATE_SYNC_NO_COPY);
if (r != MIGRATEPAGE_SUCCESS)
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+ src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
}
/*
@@ -761,28 +778,56 @@ void migrate_vma_pages(struct migrate_vma *migrate)
if (notified)
mmu_notifier_invalidate_range_only_end(&range);
}
-EXPORT_SYMBOL(migrate_vma_pages);
/**
- * migrate_vma_finalize() - restore CPU page table entry
+ * migrate_device_pages() - migrate meta-data from src page to dst page
+ * @src_pfns: src_pfns returned from migrate_device_range()
+ * @dst_pfns: array of pfns allocated by the driver to migrate memory to
+ * @npages: number of pages in the range
+ *
+ * Equivalent to migrate_vma_pages(). This is called to migrate struct page
+ * meta-data from source struct page to destination.
+ */
+void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
+ unsigned long npages)
+{
+ __migrate_device_pages(src_pfns, dst_pfns, npages, NULL);
+}
+EXPORT_SYMBOL(migrate_device_pages);
+
+/**
+ * migrate_vma_pages() - migrate meta-data from src page to dst page
* @migrate: migrate struct containing all migration information
*
- * This replaces the special migration pte entry with either a mapping to the
- * new page if migration was successful for that page, or to the original page
- * otherwise.
+ * This migrates struct page meta-data from source struct page to destination
+ * struct page. This effectively finishes the migration from source page to the
+ * destination page.
+ */
+void migrate_vma_pages(struct migrate_vma *migrate)
+{
+ __migrate_device_pages(migrate->src, migrate->dst, migrate->npages, migrate);
+}
+EXPORT_SYMBOL(migrate_vma_pages);
+
+/*
+ * migrate_device_finalize() - complete page migration
+ * @src_pfns: src_pfns returned from migrate_device_range()
+ * @dst_pfns: array of pfns allocated by the driver to migrate memory to
+ * @npages: number of pages in the range
*
- * This also unlocks the pages and puts them back on the lru, or drops the extra
- * refcount, for device pages.
+ * Completes migration of the page by removing special migration entries.
+ * Drivers must ensure copying of page data is complete and visible to the CPU
+ * before calling this.
*/
-void migrate_vma_finalize(struct migrate_vma *migrate)
+void migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages)
{
- const unsigned long npages = migrate->npages;
unsigned long i;
for (i = 0; i < npages; i++) {
struct folio *dst, *src;
- struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
- struct page *page = migrate_pfn_to_page(migrate->src[i]);
+ struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
+ struct page *page = migrate_pfn_to_page(src_pfns[i]);
if (!page) {
if (newpage) {
@@ -792,7 +837,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
continue;
}
- if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
+ if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
if (newpage) {
unlock_page(newpage);
put_page(newpage);
@@ -819,8 +864,72 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
}
}
}
+EXPORT_SYMBOL(migrate_device_finalize);
+
+/**
+ * migrate_vma_finalize() - restore CPU page table entry
+ * @migrate: migrate struct containing all migration information
+ *
+ * This replaces the special migration pte entry with either a mapping to the
+ * new page if migration was successful for that page, or to the original page
+ * otherwise.
+ *
+ * This also unlocks the pages and puts them back on the lru, or drops the extra
+ * refcount, for device pages.
+ */
+void migrate_vma_finalize(struct migrate_vma *migrate)
+{
+ migrate_device_finalize(migrate->src, migrate->dst, migrate->npages);
+}
EXPORT_SYMBOL(migrate_vma_finalize);
+/**
+ * migrate_device_range() - migrate device private pfns to normal memory.
+ * @src_pfns: array large enough to hold migrating source device private pfns.
+ * @start: starting pfn in the range to migrate.
+ * @npages: number of pages to migrate.
+ *
+ * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that
+ * instead of looking up pages based on virtual address mappings a range of
+ * device pfns that should be migrated to system memory is used instead.
+ *
+ * This is useful when a driver needs to free device memory but doesn't know the
+ * virtual mappings of every page that may be in device memory. For example this
+ * is often the case when a driver is being unloaded or unbound from a device.
+ *
+ * Like migrate_vma_setup() this function will take a reference and lock any
+ * migrating pages that aren't free before unmapping them. Drivers may then
+ * allocate destination pages and start copying data from the device to CPU
+ * memory before calling migrate_device_pages().
+ */
+int migrate_device_range(unsigned long *src_pfns, unsigned long start,
+ unsigned long npages)
+{
+ unsigned long i, pfn;
+
+ for (pfn = start, i = 0; i < npages; pfn++, i++) {
+ struct page *page = pfn_to_page(pfn);
+
+ if (!get_page_unless_zero(page)) {
+ src_pfns[i] = 0;
+ continue;
+ }
+
+ if (!trylock_page(page)) {
+ src_pfns[i] = 0;
+ put_page(page);
+ continue;
+ }
+
+ src_pfns[i] = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
+ }
+
+ migrate_device_unmap(src_pfns, npages, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL(migrate_device_range);
+
/*
* Migrate a device coherent page back to normal memory. The caller should have
* a reference on page which will be copied to the new page if migration is
@@ -829,25 +938,19 @@ EXPORT_SYMBOL(migrate_vma_finalize);
int migrate_device_coherent_page(struct page *page)
{
unsigned long src_pfn, dst_pfn = 0;
- struct migrate_vma args;
struct page *dpage;
WARN_ON_ONCE(PageCompound(page));
lock_page(page);
src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
- args.src = &src_pfn;
- args.dst = &dst_pfn;
- args.cpages = 1;
- args.npages = 1;
- args.vma = NULL;
/*
* We don't have a VMA and don't need to walk the page tables to find
* the source page. So call migrate_vma_unmap() directly to unmap the
* page as migrate_vma_setup() will fail if args.vma == NULL.
*/
- migrate_vma_unmap(&args);
+ migrate_device_unmap(&src_pfn, 1, NULL);
if (!(src_pfn & MIGRATE_PFN_MIGRATE))
return -EBUSY;
@@ -857,10 +960,10 @@ int migrate_device_coherent_page(struct page *page)
dst_pfn = migrate_pfn(page_to_pfn(dpage));
}
- migrate_vma_pages(&args);
+ migrate_device_pages(&src_pfn, &dst_pfn, 1);
if (src_pfn & MIGRATE_PFN_MIGRATE)
copy_highpage(dpage, page);
- migrate_vma_finalize(&args);
+ migrate_device_finalize(&src_pfn, &dst_pfn, 1);
if (src_pfn & MIGRATE_PFN_MIGRATE)
return 0;
diff --git a/mm/mmap.c b/mm/mmap.c
index 6e447544f07d..bf2122af94e7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2673,7 +2673,7 @@ cannot_expand:
if (!arch_validate_flags(vma->vm_flags)) {
error = -EINVAL;
if (file)
- goto unmap_and_free_vma;
+ goto close_and_free_vma;
else
goto free_vma;
}
@@ -2742,6 +2742,9 @@ expanded:
validate_mm(mm);
return addr;
+close_and_free_vma:
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
unmap_and_free_vma:
fput(vma->vm_file);
vma->vm_file = NULL;
@@ -2942,17 +2945,18 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
if (vma &&
(!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) &&
((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
- mas->index = vma->vm_start;
- mas->last = addr + len - 1;
- vma_adjust_trans_huge(vma, addr, addr + len, 0);
+ mas_set_range(mas, vma->vm_start, addr + len - 1);
+ if (mas_preallocate(mas, vma, GFP_KERNEL))
+ return -ENOMEM;
+
+ vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
if (vma->anon_vma) {
anon_vma_lock_write(vma->anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
}
vma->vm_end = addr + len;
vma->vm_flags |= VM_SOFTDIRTY;
- if (mas_store_gfp(mas, vma, GFP_KERNEL))
- goto mas_expand_failed;
+ mas_store_prealloc(mas, vma);
if (vma->anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
@@ -2993,13 +2997,6 @@ mas_store_fail:
vma_alloc_fail:
vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM;
-
-mas_expand_failed:
- if (vma->anon_vma) {
- anon_vma_interval_tree_post_update_vma(vma);
- anon_vma_unlock_write(vma->anon_vma);
- }
- return -ENOMEM;
}
int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
@@ -3240,6 +3237,11 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
out_vma_link:
if (new_vma->vm_ops && new_vma->vm_ops->close)
new_vma->vm_ops->close(new_vma);
+
+ if (new_vma->vm_file)
+ fput(new_vma->vm_file);
+
+ unlink_anon_vmas(new_vma);
out_free_mempol:
mpol_put(vma_policy(new_vma));
out_free_vma:
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index a71924bd38c0..add4244e5790 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -1,6 +1,7 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
+#include <linux/kmsan-checks.h>
#include <linux/mmdebug.h>
#include <linux/mm_types.h>
#include <linux/mm_inline.h>
@@ -265,6 +266,15 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
bool fullmm)
{
+ /*
+ * struct mmu_gather contains 7 1-bit fields packed into a 32-bit
+ * unsigned int value. The remaining 25 bits remain uninitialized
+ * and are never used, but KMSAN updates the origin for them in
+ * zap_pXX_range() in mm/memory.c, thus creating very long origin
+ * chains. This is technically correct, but consumes too much memory.
+ * Unpoisoning the whole structure will prevent creating such chains.
+ */
+ kmsan_unpoison_memory(tlb, sizeof(*tlb));
tlb->mm = mm;
tlb->fullmm = fullmm;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 461dcbd4f21a..668bfaa6ed2a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -267,6 +267,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
} else {
/* It must be an none page, or what else?.. */
WARN_ON_ONCE(!pte_none(oldpte));
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
if (unlikely(uffd_wp && !vma_is_anonymous(vma))) {
/*
* For file-backed mem, we need to be able to
@@ -278,6 +279,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
make_pte_marker(PTE_MARKER_UFFD_WP));
pages++;
}
+#endif
}
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ac2c9f12a7b2..e20ade858e71 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3446,7 +3446,7 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
int pindex;
bool free_high;
- __count_vm_event(PGFREE);
+ __count_vm_events(PGFREE, 1 << order);
pindex = order_to_pindex(migratetype, order);
list_add(&page->pcp_list, &pcp->lists[pindex]);
pcp->count += 1 << order;
@@ -3803,7 +3803,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
pcp_spin_unlock_irqrestore(pcp, flags);
pcp_trylock_finish(UP_flags);
if (page) {
- __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone, 1);
}
return page;
@@ -6823,6 +6823,14 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
cond_resched();
}
+
+ /*
+ * ZONE_DEVICE pages are released directly to the driver page allocator
+ * which will set the page count to 1 when allocating the page.
+ */
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
+ pgmap->type == MEMORY_DEVICE_COHERENT)
+ set_page_count(page, 0);
}
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 86214d48dd09..8280a5cb48df 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2332,7 +2332,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, struct inode *dir,
inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_blocks = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
- inode->i_generation = prandom_u32();
+ inode->i_generation = get_random_u32();
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
spin_lock_init(&info->lock);
diff --git a/mm/slab.c b/mm/slab.c
index a5486ff8362a..59c8e28f7b6a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1619,7 +1619,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slab)
* although actual page can be freed in rcu context
*/
if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->freelist_cache, freelist);
+ kfree(freelist);
}
/*
@@ -1671,21 +1671,27 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
if (flags & CFLGS_OFF_SLAB) {
struct kmem_cache *freelist_cache;
size_t freelist_size;
+ size_t freelist_cache_size;
freelist_size = num * sizeof(freelist_idx_t);
- freelist_cache = kmalloc_slab(freelist_size, 0u);
- if (!freelist_cache)
- continue;
-
- /*
- * Needed to avoid possible looping condition
- * in cache_grow_begin()
- */
- if (OFF_SLAB(freelist_cache))
- continue;
+ if (freelist_size > KMALLOC_MAX_CACHE_SIZE) {
+ freelist_cache_size = PAGE_SIZE << get_order(freelist_size);
+ } else {
+ freelist_cache = kmalloc_slab(freelist_size, 0u);
+ if (!freelist_cache)
+ continue;
+ freelist_cache_size = freelist_cache->size;
+
+ /*
+ * Needed to avoid possible looping condition
+ * in cache_grow_begin()
+ */
+ if (OFF_SLAB(freelist_cache))
+ continue;
+ }
/* check if off slab has enough benefit */
- if (freelist_cache->size > cachep->size / 2)
+ if (freelist_cache_size > cachep->size / 2)
continue;
}
@@ -2061,11 +2067,6 @@ done:
cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif
- if (OFF_SLAB(cachep)) {
- cachep->freelist_cache =
- kmalloc_slab(cachep->freelist_size, 0u);
- }
-
err = setup_cpu_cache(cachep, gfp);
if (err) {
__kmem_cache_release(cachep);
@@ -2292,7 +2293,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
freelist = NULL;
else if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
- freelist = kmem_cache_alloc_node(cachep->freelist_cache,
+ freelist = kmalloc_node(cachep->freelist_size,
local_flags, nodeid);
} else {
/* We will use last bytes at the slab for freelist */
@@ -2380,7 +2381,7 @@ static bool freelist_state_initialize(union freelist_init_state *state,
unsigned int rand;
/* Use best entropy available to define a random shift */
- rand = get_random_int();
+ rand = get_random_u32();
/* Use a random state if the pre-computed list is not available */
if (!cachep->random_seq) {
diff --git a/mm/slub.c b/mm/slub.c
index 96dd392d7f99..157527d7101b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1881,7 +1881,7 @@ static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
return false;
freelist_count = oo_objects(s->oo);
- pos = get_random_int() % freelist_count;
+ pos = prandom_u32_max(freelist_count);
page_limit = slab->objects * s->size;
start = fixup_red_left(s, slab_address(slab));
diff --git a/net/802/garp.c b/net/802/garp.c
index f6012f8e59f0..fc9eb02a912f 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -407,7 +407,7 @@ static void garp_join_timer_arm(struct garp_applicant *app)
{
unsigned long delay;
- delay = (u64)msecs_to_jiffies(garp_join_time) * prandom_u32() >> 32;
+ delay = prandom_u32_max(msecs_to_jiffies(garp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
diff --git a/net/802/mrp.c b/net/802/mrp.c
index 35e04cc5390c..155f74d8b14f 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -592,7 +592,7 @@ static void mrp_join_timer_arm(struct mrp_applicant *app)
{
unsigned long delay;
- delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32;
+ delay = prandom_u32_max(msecs_to_jiffies(mrp_join_time));
mod_timer(&app->join_timer, jiffies + delay);
}
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 829db9eba0cb..aaf64b953915 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -219,11 +219,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
if (!page)
return -ENOMEM;
- for (p = page, len = 0; len < nbytes; p++, len++) {
+ for (p = page, len = 0; len < nbytes; p++) {
if (get_user(*p, buff++)) {
free_page((unsigned long)page);
return -EFAULT;
}
+ len += 1;
if (*p == '\0' || *p == '\n')
break;
}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 6a6898ee4049..db60217f911b 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -222,7 +222,7 @@ static void pick_new_mon(struct ceph_mon_client *monc)
max--;
}
- n = prandom_u32() % max;
+ n = prandom_u32_max(max);
if (o >= 0 && n >= o)
n++;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 87b883c7bfd6..4e4f1e4bc265 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1479,7 +1479,7 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc,
static int pick_random_replica(const struct ceph_osds *acting)
{
- int i = prandom_u32() % acting->size;
+ int i = prandom_u32_max(acting->size);
dout("%s picked osd%d, primary osd%d\n", __func__,
acting->osds[i], acting->primary);
diff --git a/net/core/dev.c b/net/core/dev.c
index fa53830d0683..3be256051e99 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5136,11 +5136,13 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
case TC_ACT_SHOT:
mini_qdisc_qstats_cpu_drop(miniq);
kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS);
+ *ret = NET_RX_DROP;
return NULL;
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
case TC_ACT_TRAP:
consume_skb(skb);
+ *ret = NET_RX_SUCCESS;
return NULL;
case TC_ACT_REDIRECT:
/* skb_mac_header check was done by cls/act_bpf, so
@@ -5153,8 +5155,10 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
*another = true;
break;
}
+ *ret = NET_RX_SUCCESS;
return NULL;
case TC_ACT_CONSUMED:
+ *ret = NET_RX_SUCCESS;
return NULL;
default:
break;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e93edb810103..3c4786b99907 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -111,7 +111,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
unsigned long neigh_rand_reach_time(unsigned long base)
{
- return base ? (prandom_u32() % base) + (base >> 1) : 0;
+ return base ? prandom_u32_max(base) + (base >> 1) : 0;
}
EXPORT_SYMBOL(neigh_rand_reach_time);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 88906ba6d9a7..c3763056c554 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2324,7 +2324,7 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
pkt_dev->curfl = 0; /*reset */
}
} else {
- flow = prandom_u32() % pkt_dev->cflows;
+ flow = prandom_u32_max(pkt_dev->cflows);
pkt_dev->curfl = flow;
if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
@@ -2380,10 +2380,9 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
__u16 t;
if (pkt_dev->flags & F_QUEUE_MAP_RND) {
- t = prandom_u32() %
- (pkt_dev->queue_map_max -
- pkt_dev->queue_map_min + 1)
- + pkt_dev->queue_map_min;
+ t = prandom_u32_max(pkt_dev->queue_map_max -
+ pkt_dev->queue_map_min + 1) +
+ pkt_dev->queue_map_min;
} else {
t = pkt_dev->cur_queue_map + 1;
if (t > pkt_dev->queue_map_max)
@@ -2412,7 +2411,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
__u32 tmp;
if (pkt_dev->flags & F_MACSRC_RND)
- mc = prandom_u32() % pkt_dev->src_mac_count;
+ mc = prandom_u32_max(pkt_dev->src_mac_count);
else {
mc = pkt_dev->cur_src_mac_offset++;
if (pkt_dev->cur_src_mac_offset >=
@@ -2438,7 +2437,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
__u32 tmp;
if (pkt_dev->flags & F_MACDST_RND)
- mc = prandom_u32() % pkt_dev->dst_mac_count;
+ mc = prandom_u32_max(pkt_dev->dst_mac_count);
else {
mc = pkt_dev->cur_dst_mac_offset++;
@@ -2465,23 +2464,23 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
for (i = 0; i < pkt_dev->nr_labels; i++)
if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
- ((__force __be32)prandom_u32() &
+ ((__force __be32)get_random_u32() &
htonl(0x000fffff));
}
if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) {
- pkt_dev->vlan_id = prandom_u32() & (4096 - 1);
+ pkt_dev->vlan_id = prandom_u32_max(4096);
}
if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) {
- pkt_dev->svlan_id = prandom_u32() & (4096 - 1);
+ pkt_dev->svlan_id = prandom_u32_max(4096);
}
if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
if (pkt_dev->flags & F_UDPSRC_RND)
- pkt_dev->cur_udp_src = prandom_u32() %
- (pkt_dev->udp_src_max - pkt_dev->udp_src_min)
- + pkt_dev->udp_src_min;
+ pkt_dev->cur_udp_src = prandom_u32_max(
+ pkt_dev->udp_src_max - pkt_dev->udp_src_min) +
+ pkt_dev->udp_src_min;
else {
pkt_dev->cur_udp_src++;
@@ -2492,9 +2491,9 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
if (pkt_dev->flags & F_UDPDST_RND) {
- pkt_dev->cur_udp_dst = prandom_u32() %
- (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min)
- + pkt_dev->udp_dst_min;
+ pkt_dev->cur_udp_dst = prandom_u32_max(
+ pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) +
+ pkt_dev->udp_dst_min;
} else {
pkt_dev->cur_udp_dst++;
if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max)
@@ -2509,7 +2508,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (imn < imx) {
__u32 t;
if (pkt_dev->flags & F_IPSRC_RND)
- t = prandom_u32() % (imx - imn) + imn;
+ t = prandom_u32_max(imx - imn) + imn;
else {
t = ntohl(pkt_dev->cur_saddr);
t++;
@@ -2531,8 +2530,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->flags & F_IPDST_RND) {
do {
- t = prandom_u32() %
- (imx - imn) + imn;
+ t = prandom_u32_max(imx - imn) +
+ imn;
s = htonl(t);
} while (ipv4_is_loopback(s) ||
ipv4_is_multicast(s) ||
@@ -2569,7 +2568,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
for (i = 0; i < 4; i++) {
pkt_dev->cur_in6_daddr.s6_addr32[i] =
- (((__force __be32)prandom_u32() |
+ (((__force __be32)get_random_u32() |
pkt_dev->min_in6_daddr.s6_addr32[i]) &
pkt_dev->max_in6_daddr.s6_addr32[i]);
}
@@ -2579,9 +2578,9 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
__u32 t;
if (pkt_dev->flags & F_TXSIZE_RND) {
- t = prandom_u32() %
- (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size)
- + pkt_dev->min_pkt_size;
+ t = prandom_u32_max(pkt_dev->max_pkt_size -
+ pkt_dev->min_pkt_size) +
+ pkt_dev->min_pkt_size;
} else {
t = pkt_dev->cur_pkt_size + 1;
if (t > pkt_dev->max_pkt_size)
@@ -2590,7 +2589,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
pkt_dev->cur_pkt_size = t;
} else if (pkt_dev->n_imix_entries > 0) {
struct imix_pkt *entry;
- __u32 t = prandom_u32() % IMIX_PRECISION;
+ __u32 t = prandom_u32_max(IMIX_PRECISION);
__u8 entry_index = pkt_dev->imix_distribution[t];
entry = &pkt_dev->imix_entries[entry_index];
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index ca70525621c7..1efdc47a999b 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -500,11 +500,11 @@ bool sk_msg_is_readable(struct sock *sk)
}
EXPORT_SYMBOL_GPL(sk_msg_is_readable);
-static struct sk_msg *alloc_sk_msg(void)
+static struct sk_msg *alloc_sk_msg(gfp_t gfp)
{
struct sk_msg *msg;
- msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
+ msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN);
if (unlikely(!msg))
return NULL;
sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
@@ -520,7 +520,7 @@ static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
if (!sk_rmem_schedule(sk, skb, skb->truesize))
return NULL;
- return alloc_sk_msg();
+ return alloc_sk_msg(GFP_KERNEL);
}
static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
@@ -597,7 +597,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
u32 off, u32 len)
{
- struct sk_msg *msg = alloc_sk_msg();
+ struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC);
struct sock *sk = psock->sk;
int err;
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index 5daa1fa54249..fb90e1e00773 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -21,6 +21,22 @@ static DEFINE_IDA(reuseport_ida);
static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
struct sock_reuseport *reuse, bool bind_inany);
+void reuseport_has_conns_set(struct sock *sk)
+{
+ struct sock_reuseport *reuse;
+
+ if (!rcu_access_pointer(sk->sk_reuseport_cb))
+ return;
+
+ spin_lock_bh(&reuseport_lock);
+ reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
+ lockdep_is_held(&reuseport_lock));
+ if (likely(reuse))
+ reuse->has_conns = 1;
+ spin_unlock_bh(&reuseport_lock);
+}
+EXPORT_SYMBOL(reuseport_has_conns_set);
+
static int reuseport_sock_index(struct sock *sk,
const struct sock_reuseport *reuse,
bool closed)
diff --git a/net/core/stream.c b/net/core/stream.c
index 1105057ce00a..75fded8495f5 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -123,7 +123,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
DEFINE_WAIT_FUNC(wait, woken_wake_function);
if (sk_stream_memory_free(sk))
- current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+ current_timeo = vm_wait = prandom_u32_max(HZ / 5) + 2;
add_wait_queue(sk_sleep(sk), &wait);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 6a6e121dc00c..713b7b8dad7e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -144,7 +144,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_daddr,
inet->inet_sport,
inet->inet_dport);
- inet->inet_id = prandom_u32();
+ inet->inet_id = get_random_u16();
err = dccp_connect(sk);
rt = NULL;
@@ -443,7 +443,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
- newinet->inet_id = prandom_u32();
+ newinet->inet_id = get_random_u16();
if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
goto put_and_exit;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 1a59918d3b30..a9fde48cffd4 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -3145,7 +3145,7 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
case NETDEV_CHANGELOWERSTATE: {
struct netdev_notifier_changelowerstate_info *info = ptr;
struct dsa_port *dp;
- int err;
+ int err = 0;
if (dsa_slave_dev_check(dev)) {
dp = dsa_slave_to_port(dev);
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 5bf357734b11..a50429a62f74 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -150,15 +150,15 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
struct hsr_port *port)
{
if (!frame->skb_std) {
- if (frame->skb_hsr) {
+ if (frame->skb_hsr)
frame->skb_std =
create_stripped_skb_hsr(frame->skb_hsr, frame);
- } else {
- /* Unexpected */
- WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
- __FILE__, __LINE__, port->dev->name);
+ else
+ netdev_warn_once(port->dev,
+ "Unexpected frame received in hsr_get_untagged_frame()\n");
+
+ if (!frame->skb_std)
return NULL;
- }
}
return skb_clone(frame->skb_std, GFP_ATOMIC);
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 405a8c2aea64..4d1af0cd7d99 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -70,10 +70,10 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
}
inet->inet_daddr = fl4->daddr;
inet->inet_dport = usin->sin_port;
- reuseport_has_conns(sk, true);
+ reuseport_has_conns_set(sk);
sk->sk_state = TCP_ESTABLISHED;
sk_set_txhash(sk);
- inet->inet_id = prandom_u32();
+ inet->inet_id = get_random_u16();
sk_dst_set(sk, &rt->dst);
err = 0;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index df0660d818ac..81be3e0f0e70 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -213,7 +213,7 @@ static void igmp_stop_timer(struct ip_mc_list *im)
/* It must be called with locked im->lock */
static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
{
- int tv = prandom_u32() % max_delay;
+ int tv = prandom_u32_max(max_delay);
im->tm_running = 1;
if (!mod_timer(&im->timer, jiffies+tv+2))
@@ -222,7 +222,7 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
static void igmp_gq_start_timer(struct in_device *in_dev)
{
- int tv = prandom_u32() % in_dev->mr_maxdelay;
+ int tv = prandom_u32_max(in_dev->mr_maxdelay);
unsigned long exp = jiffies + tv + 2;
if (in_dev->mr_gq_running &&
@@ -236,7 +236,7 @@ static void igmp_gq_start_timer(struct in_device *in_dev)
static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
{
- int tv = prandom_u32() % delay;
+ int tv = prandom_u32_max(delay);
if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
in_dev_hold(in_dev);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ebca860e113f..4e84ed21d16f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -314,7 +314,7 @@ other_half_scan:
if (likely(remaining > 1))
remaining &= ~1U;
- offset = prandom_u32() % remaining;
+ offset = prandom_u32_max(remaining);
/* __inet_hash_connect() favors ports having @low parity
* We do the opposite to not pollute connect() users.
*/
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index a0ad34e4f044..d3dc28156622 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -1037,7 +1037,7 @@ ok:
* on low contention the randomness is maximal and on high contention
* it may be inexistent.
*/
- i = max_t(int, i, (prandom_u32() & 7) * 2);
+ i = max_t(int, i, prandom_u32_max(8) * 2);
WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
/* Head lock still held and bh's disabled */
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 1ae83ad629b2..922c87ef1ab5 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -172,7 +172,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
* Avoid using the hashed IP ident generator.
*/
if (sk->sk_protocol == IPPROTO_TCP)
- iph->id = (__force __be16)prandom_u32();
+ iph->id = (__force __be16)get_random_u16();
else
__ip_select_ident(net, iph, 1);
}
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index ff85db52b2e5..ded5bef02f77 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par));
+ flow.flowi4_uid = sock_net_uid(xt_net(par), NULL);
return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
}
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index e886147eed11..fc65d69f23e1 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -65,6 +65,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
struct flowi4 fl4 = {
.flowi4_scope = RT_SCOPE_UNIVERSE,
.flowi4_iif = LOOPBACK_IFINDEX,
+ .flowi4_uid = sock_net_uid(nft_net(pkt), NULL),
};
const struct net_device *oif;
const struct net_device *found;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 795cbe1de912..cd1fa9f70f1a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3664,7 +3664,7 @@ static __net_init int rt_genid_init(struct net *net)
{
atomic_set(&net->ipv4.rt_genid, 0);
atomic_set(&net->fnhe_genid, 0);
- atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
+ atomic_set(&net->ipv4.dev_addr_genid, get_random_u32());
return 0;
}
@@ -3719,7 +3719,7 @@ int __init ip_rt_init(void)
ip_idents = idents_hash;
- prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
+ get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 112f28f93693..ba4d98e510e0 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -243,7 +243,7 @@ static bool tcp_cdg_backoff(struct sock *sk, u32 grad)
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
- if (prandom_u32() <= nexp_u32(grad * backoff_factor))
+ if (get_random_u32() <= nexp_u32(grad * backoff_factor))
return false;
if (use_ineff) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6376ad915765..7a250ef9d1b7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -323,7 +323,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_daddr);
}
- inet->inet_id = prandom_u32();
+ inet->inet_id = get_random_u16();
if (tcp_fastopen_defer_connect(sk, &err))
return err;
@@ -1543,7 +1543,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (inet_opt)
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
- newinet->inet_id = prandom_u32();
+ newinet->inet_id = get_random_u16();
/* Set ToS of the new socket based upon the value of incoming SYN.
* ECT bits are set later in tcp_init_transfer().
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8126f67d18b3..c83e5271030b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -246,7 +246,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
- rand = prandom_u32();
+ rand = get_random_u32();
first = reciprocal_scale(rand, remaining) + low;
/*
* force rand to be an odd multiple of UDP_HTABLE_SIZE
@@ -448,7 +448,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
result = lookup_reuseport(net, sk, skb,
saddr, sport, daddr, hnum);
/* Fall back to scoring if group has connections */
- if (result && !reuseport_has_conns(sk, false))
+ if (result && !reuseport_has_conns(sk))
return result;
result = result ? : sk;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 10ce86bf228e..9c3f5202a97b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -104,7 +104,7 @@ static inline u32 cstamp_delta(unsigned long cstamp)
static inline s32 rfc3315_s14_backoff_init(s32 irt)
{
/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
- u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
+ u64 tmp = (900000 + prandom_u32_max(200001)) * (u64)irt;
do_div(tmp, 1000000);
return (s32)tmp;
}
@@ -112,11 +112,11 @@ static inline s32 rfc3315_s14_backoff_init(s32 irt)
static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
{
/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
- u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
+ u64 tmp = (1900000 + prandom_u32_max(200001)) * (u64)rt;
do_div(tmp, 1000000);
if ((s32)tmp > mrt) {
/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
- tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
+ tmp = (900000 + prandom_u32_max(200001)) * (u64)mrt;
do_div(tmp, 1000000);
}
return (s32)tmp;
@@ -3967,7 +3967,7 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
if (ifp->flags & IFA_F_OPTIMISTIC)
rand_num = 0;
else
- rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
+ rand_num = prandom_u32_max(idev->cnf.rtr_solicit_delay ?: 1);
nonce = 0;
if (idev->cnf.enhanced_dad ||
@@ -7214,9 +7214,11 @@ err_reg_dflt:
__addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
err_reg_all:
kfree(dflt);
+ net->ipv6.devconf_dflt = NULL;
#endif
err_alloc_dflt:
kfree(all);
+ net->ipv6.devconf_all = NULL;
err_alloc_all:
kfree(net->ipv6.inet6_addr_lst);
err_alloc_addr:
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index df665d4e8f0f..5ecb56522f9d 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -256,7 +256,7 @@ ipv4_connected:
goto out;
}
- reuseport_has_conns(sk, true);
+ reuseport_has_conns_set(sk);
sk->sk_state = TCP_ESTABLISHED;
sk_set_txhash(sk);
out:
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index ceb85c67ce39..18481eb76a0a 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -220,7 +220,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
spin_lock_bh(&ip6_fl_lock);
if (label == 0) {
for (;;) {
- fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
+ fl->label = htonl(get_random_u32())&IPV6_FLOWLABEL_MASK;
if (fl->label) {
lfl = __fl_lookup(net, fl->label);
if (!lfl)
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 0566ab03ddbe..7860383295d8 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1050,7 +1050,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
/* called with mc_lock */
static void mld_gq_start_work(struct inet6_dev *idev)
{
- unsigned long tv = prandom_u32() % idev->mc_maxdelay;
+ unsigned long tv = prandom_u32_max(idev->mc_maxdelay);
idev->mc_gq_running = 1;
if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
@@ -1068,7 +1068,7 @@ static void mld_gq_stop_work(struct inet6_dev *idev)
/* called with mc_lock */
static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
{
- unsigned long tv = prandom_u32() % delay;
+ unsigned long tv = prandom_u32_max(delay);
if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
in6_dev_hold(idev);
@@ -1085,7 +1085,7 @@ static void mld_ifc_stop_work(struct inet6_dev *idev)
/* called with mc_lock */
static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
{
- unsigned long tv = prandom_u32() % delay;
+ unsigned long tv = prandom_u32_max(delay);
if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
in6_dev_hold(idev);
@@ -1130,7 +1130,7 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
}
if (delay >= resptime)
- delay = prandom_u32() % resptime;
+ delay = prandom_u32_max(resptime);
if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
refcount_inc(&ma->mca_refcnt);
@@ -2574,7 +2574,7 @@ static void igmp6_join_group(struct ifmcaddr6 *ma)
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
- delay = prandom_u32() % unsolicited_report_interval(ma->idev);
+ delay = prandom_u32_max(unsolicited_report_interval(ma->idev));
if (cancel_delayed_work(&ma->mca_work)) {
refcount_dec(&ma->mca_refcnt);
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index 69d86b040a6a..a01d9b842bd0 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -40,6 +40,7 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
.flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev),
.flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
.flowi6_proto = iph->nexthdr,
+ .flowi6_uid = sock_net_uid(net, NULL),
.daddr = iph->saddr,
};
int lookup_flags;
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index 91faac610e03..36dc14b34388 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -66,6 +66,7 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
struct flowi6 fl6 = {
.flowi6_iif = LOOPBACK_IFINDEX,
.flowi6_proto = pkt->tprot,
+ .flowi6_uid = sock_net_uid(nft_net(pkt), NULL),
};
u32 ret = 0;
@@ -163,6 +164,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
struct flowi6 fl6 = {
.flowi6_iif = LOOPBACK_IFINDEX,
.flowi6_proto = pkt->tprot,
+ .flowi6_uid = sock_net_uid(nft_net(pkt), NULL),
};
struct rt6_info *rt;
int lookup_flags;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 2880dc7d9a49..2685c3f15e9d 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -18,7 +18,7 @@ static u32 __ipv6_select_ident(struct net *net,
u32 id;
do {
- id = prandom_u32();
+ id = get_random_u32();
} while (!id);
return id;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8d09f0ea5b8c..129ec5a9b0eb 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -195,7 +195,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
result = lookup_reuseport(net, sk, skb,
saddr, sport, daddr, hnum);
/* Fall back to scoring if group has connections */
- if (result && !reuseport_has_conns(sk, false))
+ if (result && !reuseport_has_conns(sk))
return result;
result = result ? : sk;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 7f3f5f51081d..3d91b98db099 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -2036,7 +2036,7 @@ static void __init init_sample_table(void)
memset(sample_table, 0xff, sizeof(sample_table));
for (col = 0; col < SAMPLE_COLUMNS; col++) {
- prandom_bytes(rnd, sizeof(rnd));
+ get_random_bytes(rnd, sizeof(rnd));
for (i = 0; i < MCS_GROUP_RATES; i++) {
new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
while (sample_table[col][new_idx] != 0xff)
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 0e8c4f48c36d..dc3cdee51e66 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -641,7 +641,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) {
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- u16 sn = get_random_u32();
+ u16 sn = get_random_u16();
info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO;
hdr->seq_ctrl =
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index fb67f1ca2495..8c04bb57dd6f 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1308,7 +1308,7 @@ void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
* Randomly scan 1/32 of the whole table every second
*/
for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
- unsigned int hash = prandom_u32() & ip_vs_conn_tab_mask;
+ unsigned int hash = get_random_u32() & ip_vs_conn_tab_mask;
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->ipvs != ipvs)
diff --git a/net/netfilter/ipvs/ip_vs_twos.c b/net/netfilter/ipvs/ip_vs_twos.c
index acb55d8393ef..f2579fc9c75b 100644
--- a/net/netfilter/ipvs/ip_vs_twos.c
+++ b/net/netfilter/ipvs/ip_vs_twos.c
@@ -71,8 +71,8 @@ static struct ip_vs_dest *ip_vs_twos_schedule(struct ip_vs_service *svc,
* from 0 to total_weight
*/
total_weight += 1;
- rweight1 = prandom_u32() % total_weight;
- rweight2 = prandom_u32() % total_weight;
+ rweight1 = prandom_u32_max(total_weight);
+ rweight2 = prandom_u32_max(total_weight);
/* Pick two weighted servers */
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index d8e6380f6337..18319a6e6806 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -468,7 +468,7 @@ find_free_id:
if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
else
- off = prandom_u32();
+ off = get_random_u16();
attempts = range_size;
if (attempts > max_attempts)
@@ -490,7 +490,7 @@ another_round:
if (attempts >= range_size || attempts < 16)
return;
attempts /= 2;
- off = prandom_u32();
+ off = get_random_u16();
goto another_round;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index a0653a8dfa82..58d9cbc9ccdc 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5865,8 +5865,9 @@ static bool nft_setelem_valid_key_end(const struct nft_set *set,
(NFT_SET_CONCAT | NFT_SET_INTERVAL)) {
if (flags & NFT_SET_ELEM_INTERVAL_END)
return false;
- if (!nla[NFTA_SET_ELEM_KEY_END] &&
- !(flags & NFT_SET_ELEM_CATCHALL))
+
+ if (nla[NFTA_SET_ELEM_KEY_END] &&
+ flags & NFT_SET_ELEM_CATCHALL)
return false;
} else {
if (nla[NFTA_SET_ELEM_KEY_END])
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 203e24ae472c..b26c1dcfc27b 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -34,7 +34,7 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
switch (info->mode) {
case XT_STATISTIC_MODE_RANDOM:
- if ((prandom_u32() & 0x7FFFFFFF) < info->u.random.probability)
+ if ((get_random_u32() & 0x7FFFFFFF) < info->u.random.probability)
ret = !ret;
break;
case XT_STATISTIC_MODE_NTH:
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 868db4669a29..ca3ebfdb3023 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1033,7 +1033,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
actions = nla_next(sample_arg, &rem);
if ((arg->probability != U32_MAX) &&
- (!arg->probability || prandom_u32() > arg->probability)) {
+ (!arg->probability || get_random_u32() > arg->probability)) {
if (last)
consume_skb(skb);
return 0;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d3f6db350de7..6ce8dd19f33c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1350,7 +1350,7 @@ static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
if (READ_ONCE(history[i]) == rxhash)
count++;
- victim = prandom_u32() % ROLLOVER_HLEN;
+ victim = prandom_u32_max(ROLLOVER_HLEN);
/* Avoid dirtying the cache line if possible */
if (READ_ONCE(history[victim]) != rxhash)
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 5b5fb4ca8d3e..97a29172a8ee 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -104,7 +104,7 @@ static int rds_add_bound(struct rds_sock *rs, const struct in6_addr *addr,
return -EINVAL;
last = rover;
} else {
- rover = max_t(u16, prandom_u32(), 2);
+ rover = max_t(u16, get_random_u16(), 2);
last = rover - 1;
}
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index abe1bcc5c797..62d682b96b88 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -25,7 +25,7 @@ static struct tc_action_ops act_gact_ops;
static int gact_net_rand(struct tcf_gact *gact)
{
smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
- if (prandom_u32() % gact->tcfg_pval)
+ if (prandom_u32_max(gact->tcfg_pval))
return gact->tcf_action;
return gact->tcfg_paction;
}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 5ba36f70e3a1..7a25477f5d99 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -168,7 +168,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
psample_group = rcu_dereference_bh(s->psample_group);
/* randomly sample packets according to rate */
- if (psample_group && (prandom_u32() % s->rate == 0)) {
+ if (psample_group && (prandom_u32_max(s->rate) == 0)) {
if (!skb_at_tc_ingress(skb)) {
md.in_ifindex = skb->skb_iif;
md.out_ifindex = skb->dev->ifindex;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c98af0ada706..4a27dfb1ba0f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1099,12 +1099,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
skip:
if (!ingress) {
- notify_and_destroy(net, skb, n, classid,
- rtnl_dereference(dev->qdisc), new);
+ old = rtnl_dereference(dev->qdisc);
if (new && !new->ops->attach)
qdisc_refcount_inc(new);
rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
+ notify_and_destroy(net, skb, n, classid, old, new);
+
if (new && new->ops->attach)
new->ops->attach(new);
} else {
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 55c6879d2c7e..3ed0c3342189 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -573,7 +573,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
/* Simple BLUE implementation. Lack of ECN is deliberate. */
if (vars->p_drop)
- drop |= (prandom_u32() < vars->p_drop);
+ drop |= (get_random_u32() < vars->p_drop);
/* Overload the drop_next field as an activity timeout */
if (!vars->count)
@@ -2092,11 +2092,11 @@ retry:
WARN_ON(host_load > CAKE_QUEUES);
- /* The shifted prandom_u32() is a way to apply dithering to
- * avoid accumulating roundoff errors
+ /* The get_random_u16() is a way to apply dithering to avoid
+ * accumulating roundoff errors
*/
flow->deficit += (b->flow_quantum * quantum_div[host_load] +
- (prandom_u32() >> 16)) >> 16;
+ get_random_u16()) >> 16;
list_move_tail(&flow->flowchain, &b->old_flows);
goto retry;
@@ -2224,8 +2224,12 @@ retry:
static void cake_reset(struct Qdisc *sch)
{
+ struct cake_sched_data *q = qdisc_priv(sch);
u32 c;
+ if (!q->tins)
+ return;
+
for (c = 0; c < CAKE_MAX_TINS; c++)
cake_clear_tin(sch, c);
}
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 99d318b60568..8c4fee063436 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -478,24 +478,26 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
if (opt) {
err = fq_codel_change(sch, opt, extack);
if (err)
- return err;
+ goto init_failure;
}
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
- return err;
+ goto init_failure;
if (!q->flows) {
q->flows = kvcalloc(q->flows_cnt,
sizeof(struct fq_codel_flow),
GFP_KERNEL);
- if (!q->flows)
- return -ENOMEM;
-
+ if (!q->flows) {
+ err = -ENOMEM;
+ goto init_failure;
+ }
q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
- if (!q->backlogs)
- return -ENOMEM;
-
+ if (!q->backlogs) {
+ err = -ENOMEM;
+ goto alloc_failure;
+ }
for (i = 0; i < q->flows_cnt; i++) {
struct fq_codel_flow *flow = q->flows + i;
@@ -508,6 +510,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
else
sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
+
+alloc_failure:
+ kvfree(q->flows);
+ q->flows = NULL;
+init_failure:
+ q->flows_cnt = 0;
+ return err;
}
static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 18f4273a835b..fb00ac40ecb7 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -171,7 +171,7 @@ static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
static void init_crandom(struct crndstate *state, unsigned long rho)
{
state->rho = rho;
- state->last = prandom_u32();
+ state->last = get_random_u32();
}
/* get_crandom - correlated random number generator
@@ -184,9 +184,9 @@ static u32 get_crandom(struct crndstate *state)
unsigned long answer;
if (!state || state->rho == 0) /* no correlation */
- return prandom_u32();
+ return get_random_u32();
- value = prandom_u32();
+ value = get_random_u32();
rho = (u64)state->rho + 1;
answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
state->last = answer;
@@ -200,7 +200,7 @@ static u32 get_crandom(struct crndstate *state)
static bool loss_4state(struct netem_sched_data *q)
{
struct clgstate *clg = &q->clg;
- u32 rnd = prandom_u32();
+ u32 rnd = get_random_u32();
/*
* Makes a comparison between rnd and the transition
@@ -268,15 +268,15 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
switch (clg->state) {
case GOOD_STATE:
- if (prandom_u32() < clg->a1)
+ if (get_random_u32() < clg->a1)
clg->state = BAD_STATE;
- if (prandom_u32() < clg->a4)
+ if (get_random_u32() < clg->a4)
return true;
break;
case BAD_STATE:
- if (prandom_u32() < clg->a2)
+ if (get_random_u32() < clg->a2)
clg->state = GOOD_STATE;
- if (prandom_u32() > clg->a3)
+ if (get_random_u32() > clg->a3)
return true;
}
@@ -513,8 +513,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto finish_segs;
}
- skb->data[prandom_u32() % skb_headlen(skb)] ^=
- 1<<(prandom_u32() % 8);
+ skb->data[prandom_u32_max(skb_headlen(skb))] ^=
+ 1<<prandom_u32_max(8);
}
if (unlikely(sch->q.qlen >= sch->limit)) {
@@ -632,7 +632,7 @@ static void get_slot_next(struct netem_sched_data *q, u64 now)
if (!q->slot_dist)
next_delay = q->slot_config.min_delay +
- (prandom_u32() *
+ (get_random_u32() *
(q->slot_config.max_delay -
q->slot_config.min_delay) >> 32);
else
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 974038ba6c7b..265c238047a4 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -72,7 +72,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
if (vars->accu_prob >= (MAX_PROB / 2) * 17)
return true;
- prandom_bytes(&rnd, 8);
+ get_random_bytes(&rnd, 8);
if ((rnd >> BITS_PER_BYTE) < local_prob) {
vars->accu_prob = 0;
return true;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index e2389fa3cff8..1871a1c0224d 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -379,7 +379,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto enqueue;
}
- r = prandom_u32() & SFB_MAX_PROB;
+ r = get_random_u16() & SFB_MAX_PROB;
if (unlikely(r < p_min)) {
if (unlikely(p_min > SFB_MAX_PROB / 2)) {
@@ -455,7 +455,8 @@ static void sfb_reset(struct Qdisc *sch)
{
struct sfb_sched_data *q = qdisc_priv(sch);
- qdisc_reset(q->qdisc);
+ if (likely(q->qdisc))
+ qdisc_reset(q->qdisc);
q->slot = 0;
q->double_buffering = false;
sfb_zero_all_buckets(q);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 171f1a35d205..83628c347744 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8319,7 +8319,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
- rover = prandom_u32() % remaining + low;
+ rover = prandom_u32_max(remaining) + low;
do {
rover++;
@@ -9448,7 +9448,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
newinet->inet_dport = htons(asoc->peer.port);
newinet->pmtudisc = inet->pmtudisc;
- newinet->inet_id = prandom_u32();
+ newinet->inet_id = get_random_u16();
newinet->uc_ttl = inet->uc_ttl;
newinet->mc_loop = 1;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index e6ee797640b4..c305d8dd23f8 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -896,7 +896,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
}
memcpy(lgr->pnet_id, ibdev->pnetid[ibport - 1],
SMC_MAX_PNETID_LEN);
- if (smc_wr_alloc_lgr_mem(lgr))
+ rc = smc_wr_alloc_lgr_mem(lgr);
+ if (rc)
goto free_wq;
smc_llc_lgr_init(lgr, smc);
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 5f96e75f9eec..48337687848c 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -130,8 +130,8 @@ gss_krb5_make_confounder(char *p, u32 conflen)
/* initialize to random value */
if (i == 0) {
- i = prandom_u32();
- i = (i << 32) | prandom_u32();
+ i = get_random_u32();
+ i = (i << 32) | get_random_u32();
}
switch (conflen) {
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index c3c693b51c94..f075a9fb5ccc 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -677,7 +677,7 @@ static void cache_limit_defers(void)
/* Consider removing either the first or the last */
if (cache_defer_cnt > DFR_MAX) {
- if (prandom_u32() & 1)
+ if (prandom_u32_max(2))
discard = list_entry(cache_defer_list.next,
struct cache_deferred_req, recent);
else
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 71dc26373444..656cec208371 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1865,7 +1865,7 @@ xprt_alloc_xid(struct rpc_xprt *xprt)
static void
xprt_init_xid(struct rpc_xprt *xprt)
{
- xprt->xid = prandom_u32();
+ xprt->xid = get_random_u32();
}
static void
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index f34d5427b66c..915b9902f673 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1619,7 +1619,7 @@ static int xs_get_random_port(void)
if (max < min)
return -EADDRINUSE;
range = max - min + 1;
- rand = (unsigned short) prandom_u32() % range;
+ rand = prandom_u32_max(range);
return rand + min;
}
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index da69e1abf68f..e8630707901e 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -148,8 +148,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
{
struct net *net = d->net;
struct tipc_net *tn = tipc_net(net);
- bool trial = time_before(jiffies, tn->addr_trial_end);
u32 self = tipc_own_addr(net);
+ bool trial = time_before(jiffies, tn->addr_trial_end) && !self;
if (mtyp == DSC_TRIAL_FAIL_MSG) {
if (!trial)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f1c3b8eb4b3d..e902b01ea3cb 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -3010,7 +3010,7 @@ static int tipc_sk_insert(struct tipc_sock *tsk)
struct net *net = sock_net(sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
- u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
+ u32 portid = prandom_u32_max(remaining) + TIPC_MIN_PORT;
while (remaining--) {
portid++;
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 5522865deae9..14fd05fd6107 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -568,7 +568,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
sub.seq.upper = upper;
sub.timeout = TIPC_WAIT_FOREVER;
sub.filter = filter;
- *(u32 *)&sub.usr_handle = port;
+ *(u64 *)&sub.usr_handle = (u64)port;
con = tipc_conn_alloc(tipc_topsrv(net));
if (IS_ERR(con))
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
index 9b79e334dbd9..955ac3e0bf4d 100644
--- a/net/tls/tls_strp.c
+++ b/net/tls/tls_strp.c
@@ -273,7 +273,7 @@ static int tls_strp_read_copyin(struct tls_strparser *strp)
return desc.error;
}
-static int tls_strp_read_short(struct tls_strparser *strp)
+static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
{
struct skb_shared_info *shinfo;
struct page *page;
@@ -283,7 +283,7 @@ static int tls_strp_read_short(struct tls_strparser *strp)
* to read the data out. Otherwise the connection will stall.
* Without pressure threshold of INT_MAX will never be ready.
*/
- if (likely(!tcp_epollin_ready(strp->sk, INT_MAX)))
+ if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX)))
return 0;
shinfo = skb_shinfo(strp->anchor);
@@ -315,6 +315,27 @@ static int tls_strp_read_short(struct tls_strparser *strp)
return 0;
}
+static bool tls_strp_check_no_dup(struct tls_strparser *strp)
+{
+ unsigned int len = strp->stm.offset + strp->stm.full_len;
+ struct sk_buff *skb;
+ u32 seq;
+
+ skb = skb_shinfo(strp->anchor)->frag_list;
+ seq = TCP_SKB_CB(skb)->seq;
+
+ while (skb->len < len) {
+ seq += skb->len;
+ len -= skb->len;
+ skb = skb->next;
+
+ if (TCP_SKB_CB(skb)->seq != seq)
+ return false;
+ }
+
+ return true;
+}
+
static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
{
struct tcp_sock *tp = tcp_sk(strp->sk);
@@ -373,7 +394,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
return tls_strp_read_copyin(strp);
if (inq < strp->stm.full_len)
- return tls_strp_read_short(strp);
+ return tls_strp_read_copy(strp, true);
if (!strp->stm.full_len) {
tls_strp_load_anchor_with_queue(strp, inq);
@@ -387,9 +408,12 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
strp->stm.full_len = sz;
if (!strp->stm.full_len || inq < strp->stm.full_len)
- return tls_strp_read_short(strp);
+ return tls_strp_read_copy(strp, true);
}
+ if (!tls_strp_check_no_dup(strp))
+ return tls_strp_read_copy(strp, false);
+
strp->msg_ready = 1;
tls_rx_msg_ready(strp);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 15dbb392c875..b3545fc68097 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1147,7 +1147,7 @@ static int unix_autobind(struct sock *sk)
addr->name->sun_family = AF_UNIX;
refcount_set(&addr->refcnt, 1);
- ordernum = prandom_u32();
+ ordernum = get_random_u32();
lastnum = ordernum & 0xFFFFF;
retry:
ordernum = (ordernum + 1) & 0xFFFFF;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index d45d5366115a..dc2763540393 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -204,6 +204,7 @@ void wait_for_unix_gc(void)
/* The external entry point: unix_gc() */
void unix_gc(void)
{
+ struct sk_buff *next_skb, *skb;
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
@@ -297,11 +298,30 @@ void unix_gc(void)
spin_unlock(&unix_gc_lock);
+ /* We need io_uring to clean its registered files, ignore all io_uring
+ * originated skbs. It's fine as io_uring doesn't keep references to
+ * other io_uring instances and so killing all other files in the cycle
+ * will put all io_uring references forcing it to go through normal
+ * release.path eventually putting registered files.
+ */
+ skb_queue_walk_safe(&hitlist, skb, next_skb) {
+ if (skb->scm_io_uring) {
+ __skb_unlink(skb, &hitlist);
+ skb_queue_tail(&skb->sk->sk_receive_queue, skb);
+ }
+ }
+
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
+ /* There could be io_uring registered files, just push them back to
+ * the inflight list
+ */
+ list_for_each_entry_safe(u, next, &gc_candidates, link)
+ list_move_tail(&u->link, &gc_inflight_list);
+
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 81df34b3da6e..3d2fe7712ac5 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2072,7 +2072,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
} else {
u32 spi = 0;
for (h = 0; h < high-low+1; h++) {
- spi = low + prandom_u32()%(high-low+1);
+ spi = low + prandom_u32_max(high - low + 1);
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
if (x0 == NULL) {
newspi = htonl(spi);
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 22adbf89cb31..41f3602fc8de 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -140,7 +140,7 @@ $(obj)/%.symtypes : $(src)/%.c FORCE
# LLVM assembly
# Generate .ll files from .c
quiet_cmd_cc_ll_c = CC $(quiet_modtag) $@
- cmd_cc_ll_c = $(CC) $(c_flags) -emit-llvm -S -o $@ $<
+ cmd_cc_ll_c = $(CC) $(c_flags) -emit-llvm -S -fno-discard-value-names -o $@ $<
$(obj)/%.ll: $(src)/%.c FORCE
$(call if_changed_dep,cc_ll_c)
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 7740ce3b29e8..8489a3402eb8 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -119,7 +119,7 @@ quiet_cmd_modpost = MODPOST $@
echo >&2 "WARNING: $(missing-input) is missing."; \
echo >&2 " Modules may not have dependencies or modversions."; \
echo >&2 " You may get many unresolved symbol warnings.";) \
- sed 's/ko$$/o/' $(or $(modorder-if-needed), /dev/null) | $(MODPOST) $(modpost-args) $(vmlinux.o-if-present) -T -
+ sed 's/ko$$/o/' $(or $(modorder-if-needed), /dev/null) | $(MODPOST) $(modpost-args) -T - $(vmlinux.o-if-present)
targets += $(output-symdump)
$(output-symdump): $(modorder-if-needed) $(vmlinux.o-if-present) $(moudle.symvers-if-present) $(MODPOST) FORCE
diff --git a/scripts/clang-tools/run-clang-tools.py b/scripts/clang-tools/run-clang-tools.py
index bb78c9bde55c..56f2ec8f0f40 100755
--- a/scripts/clang-tools/run-clang-tools.py
+++ b/scripts/clang-tools/run-clang-tools.py
@@ -45,13 +45,14 @@ def init(l, a):
def run_analysis(entry):
# Disable all checks, then re-enable the ones we want
- checks = "-checks=-*,"
+ checks = []
+ checks.append("-checks=-*")
if args.type == "clang-tidy":
- checks += "linuxkernel-*"
+ checks.append("linuxkernel-*")
else:
- checks += "clang-analyzer-*"
- checks += ",-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling"
- p = subprocess.run(["clang-tidy", "-p", args.path, checks, entry["file"]],
+ checks.append("clang-analyzer-*")
+ checks.append("-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling")
+ p = subprocess.run(["clang-tidy", "-p", args.path, ",".join(checks), entry["file"]],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=entry["directory"])
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index c920c1b18e7a..70392fd2fd29 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -97,8 +97,6 @@ $M $MAKE %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} modules_install
$MAKE %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
cp System.map %{buildroot}/boot/System.map-$KERNELRELEASE
cp .config %{buildroot}/boot/config-$KERNELRELEASE
- bzip2 -9 --keep vmlinux
- mv vmlinux.bz2 %{buildroot}/boot/vmlinux-$KERNELRELEASE.bz2
$S$M rm -f %{buildroot}/lib/modules/$KERNELRELEASE/build
$S$M rm -f %{buildroot}/lib/modules/$KERNELRELEASE/source
$S$M mkdir -p %{buildroot}/usr/src/kernels/$KERNELRELEASE
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 6963d5a487b3..d8edb6055072 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1899,10 +1899,8 @@ static int snd_rawmidi_free(struct snd_rawmidi *rmidi)
snd_info_free_entry(rmidi->proc_entry);
rmidi->proc_entry = NULL;
- mutex_lock(&register_mutex);
if (rmidi->ops && rmidi->ops->dev_unregister)
rmidi->ops->dev_unregister(rmidi);
- mutex_unlock(&register_mutex);
snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT]);
snd_rawmidi_free_substreams(&rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]);
diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
index 7ed0a2a91035..2751bf2ff61b 100644
--- a/sound/core/sound_oss.c
+++ b/sound/core/sound_oss.c
@@ -162,7 +162,6 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev)
mutex_unlock(&sound_oss_mutex);
return -ENOENT;
}
- unregister_sound_special(minor);
switch (SNDRV_MINOR_OSS_DEVICE(minor)) {
case SNDRV_MINOR_OSS_PCM:
track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_AUDIO);
@@ -174,12 +173,18 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev)
track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI1);
break;
}
- if (track2 >= 0) {
- unregister_sound_special(track2);
+ if (track2 >= 0)
snd_oss_minors[track2] = NULL;
- }
snd_oss_minors[minor] = NULL;
mutex_unlock(&sound_oss_mutex);
+
+ /* call unregister_sound_special() outside sound_oss_mutex;
+ * otherwise may deadlock, as it can trigger the release of a card
+ */
+ unregister_sound_special(minor);
+ if (track2 >= 0)
+ unregister_sound_special(track2);
+
kfree(mptr);
return 0;
}
diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
index 3952f2853703..e5f0549bf06d 100644
--- a/sound/pci/hda/cs35l41_hda.c
+++ b/sound/pci/hda/cs35l41_hda.c
@@ -91,20 +91,18 @@ static const struct reg_sequence cs35l41_hda_mute[] = {
{ CS35L41_AMP_DIG_VOL_CTRL, 0x0000A678 }, // AMP_VOL_PCM Mute
};
-static int cs35l41_control_add(struct cs_dsp_coeff_ctl *cs_ctl)
+static void cs35l41_add_controls(struct cs35l41_hda *cs35l41)
{
- struct cs35l41_hda *cs35l41 = container_of(cs_ctl->dsp, struct cs35l41_hda, cs_dsp);
struct hda_cs_dsp_ctl_info info;
info.device_name = cs35l41->amp_name;
info.fw_type = cs35l41->firmware_type;
info.card = cs35l41->codec->card;
- return hda_cs_dsp_control_add(cs_ctl, &info);
+ hda_cs_dsp_add_controls(&cs35l41->cs_dsp, &info);
}
static const struct cs_dsp_client_ops client_ops = {
- .control_add = cs35l41_control_add,
.control_remove = hda_cs_dsp_control_remove,
};
@@ -435,6 +433,8 @@ static int cs35l41_init_dsp(struct cs35l41_hda *cs35l41)
if (ret)
goto err_release;
+ cs35l41_add_controls(cs35l41);
+
ret = cs35l41_save_calibration(cs35l41);
err_release:
@@ -461,9 +461,12 @@ static void cs35l41_remove_dsp(struct cs35l41_hda *cs35l41)
struct cs_dsp *dsp = &cs35l41->cs_dsp;
cancel_work_sync(&cs35l41->fw_load_work);
+
+ mutex_lock(&cs35l41->fw_mutex);
cs35l41_shutdown_dsp(cs35l41);
cs_dsp_remove(dsp);
cs35l41->halo_initialized = false;
+ mutex_unlock(&cs35l41->fw_mutex);
}
/* Protection release cycle to get the speaker out of Safe-Mode */
@@ -487,10 +490,10 @@ static void cs35l41_hda_playback_hook(struct device *dev, int action)
struct regmap *reg = cs35l41->regmap;
int ret = 0;
- mutex_lock(&cs35l41->fw_mutex);
-
switch (action) {
case HDA_GEN_PCM_ACT_OPEN:
+ pm_runtime_get_sync(dev);
+ mutex_lock(&cs35l41->fw_mutex);
cs35l41->playback_started = true;
if (cs35l41->firmware_running) {
regmap_multi_reg_write(reg, cs35l41_hda_config_dsp,
@@ -508,15 +511,21 @@ static void cs35l41_hda_playback_hook(struct device *dev, int action)
CS35L41_AMP_EN_MASK, 1 << CS35L41_AMP_EN_SHIFT);
if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST)
regmap_write(reg, CS35L41_GPIO1_CTRL1, 0x00008001);
+ mutex_unlock(&cs35l41->fw_mutex);
break;
case HDA_GEN_PCM_ACT_PREPARE:
+ mutex_lock(&cs35l41->fw_mutex);
ret = cs35l41_global_enable(reg, cs35l41->hw_cfg.bst_type, 1);
+ mutex_unlock(&cs35l41->fw_mutex);
break;
case HDA_GEN_PCM_ACT_CLEANUP:
+ mutex_lock(&cs35l41->fw_mutex);
regmap_multi_reg_write(reg, cs35l41_hda_mute, ARRAY_SIZE(cs35l41_hda_mute));
ret = cs35l41_global_enable(reg, cs35l41->hw_cfg.bst_type, 0);
+ mutex_unlock(&cs35l41->fw_mutex);
break;
case HDA_GEN_PCM_ACT_CLOSE:
+ mutex_lock(&cs35l41->fw_mutex);
ret = regmap_update_bits(reg, CS35L41_PWR_CTRL2,
CS35L41_AMP_EN_MASK, 0 << CS35L41_AMP_EN_SHIFT);
if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST)
@@ -530,14 +539,16 @@ static void cs35l41_hda_playback_hook(struct device *dev, int action)
}
cs35l41_irq_release(cs35l41);
cs35l41->playback_started = false;
+ mutex_unlock(&cs35l41->fw_mutex);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
break;
default:
dev_warn(cs35l41->dev, "Playback action not supported: %d\n", action);
break;
}
- mutex_unlock(&cs35l41->fw_mutex);
-
if (ret)
dev_err(cs35l41->dev, "Regmap access fail: %d\n", ret);
}
@@ -562,45 +573,148 @@ static int cs35l41_hda_channel_map(struct device *dev, unsigned int tx_num, unsi
rx_slot);
}
+static void cs35l41_ready_for_reset(struct cs35l41_hda *cs35l41)
+{
+ mutex_lock(&cs35l41->fw_mutex);
+ if (cs35l41->firmware_running) {
+
+ regcache_cache_only(cs35l41->regmap, false);
+
+ cs35l41_exit_hibernate(cs35l41->dev, cs35l41->regmap);
+ cs35l41_shutdown_dsp(cs35l41);
+ cs35l41_safe_reset(cs35l41->regmap, cs35l41->hw_cfg.bst_type);
+
+ regcache_cache_only(cs35l41->regmap, true);
+ regcache_mark_dirty(cs35l41->regmap);
+ }
+ mutex_unlock(&cs35l41->fw_mutex);
+}
+
+static int cs35l41_system_suspend(struct device *dev)
+{
+ struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ int ret;
+
+ dev_dbg(cs35l41->dev, "System Suspend\n");
+
+ if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
+ dev_err(cs35l41->dev, "System Suspend not supported\n");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+
+ /* Shutdown DSP before system suspend */
+ cs35l41_ready_for_reset(cs35l41);
+
+ /*
+ * Reset GPIO may be shared, so cannot reset here.
+ * However beyond this point, amps may be powered down.
+ */
+ return 0;
+}
+
+static int cs35l41_system_resume(struct device *dev)
+{
+ struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ int ret;
+
+ dev_dbg(cs35l41->dev, "System Resume\n");
+
+ if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
+ dev_err(cs35l41->dev, "System Resume not supported\n");
+ return -EINVAL;
+ }
+
+ if (cs35l41->reset_gpio) {
+ usleep_range(2000, 2100);
+ gpiod_set_value_cansleep(cs35l41->reset_gpio, 1);
+ }
+
+ usleep_range(2000, 2100);
+
+ ret = pm_runtime_force_resume(dev);
+
+ mutex_lock(&cs35l41->fw_mutex);
+ if (!ret && cs35l41->request_fw_load && !cs35l41->fw_request_ongoing) {
+ cs35l41->fw_request_ongoing = true;
+ schedule_work(&cs35l41->fw_load_work);
+ }
+ mutex_unlock(&cs35l41->fw_mutex);
+
+ return ret;
+}
+
static int cs35l41_runtime_suspend(struct device *dev)
{
struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ int ret = 0;
- dev_dbg(cs35l41->dev, "Suspend\n");
+ dev_dbg(cs35l41->dev, "Runtime Suspend\n");
- if (!cs35l41->firmware_running)
+ if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
+ dev_dbg(cs35l41->dev, "Runtime Suspend not supported\n");
return 0;
+ }
- if (cs35l41_enter_hibernate(cs35l41->dev, cs35l41->regmap, cs35l41->hw_cfg.bst_type) < 0)
- return 0;
+ mutex_lock(&cs35l41->fw_mutex);
+
+ if (cs35l41->playback_started) {
+ regmap_multi_reg_write(cs35l41->regmap, cs35l41_hda_mute,
+ ARRAY_SIZE(cs35l41_hda_mute));
+ cs35l41_global_enable(cs35l41->regmap, cs35l41->hw_cfg.bst_type, 0);
+ regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+ CS35L41_AMP_EN_MASK, 0 << CS35L41_AMP_EN_SHIFT);
+ if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST)
+ regmap_write(cs35l41->regmap, CS35L41_GPIO1_CTRL1, 0x00000001);
+ regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+ CS35L41_VMON_EN_MASK | CS35L41_IMON_EN_MASK,
+ 0 << CS35L41_VMON_EN_SHIFT | 0 << CS35L41_IMON_EN_SHIFT);
+ cs35l41->playback_started = false;
+ }
+
+ if (cs35l41->firmware_running) {
+ ret = cs35l41_enter_hibernate(cs35l41->dev, cs35l41->regmap,
+ cs35l41->hw_cfg.bst_type);
+ if (ret)
+ goto err;
+ } else {
+ cs35l41_safe_reset(cs35l41->regmap, cs35l41->hw_cfg.bst_type);
+ }
regcache_cache_only(cs35l41->regmap, true);
regcache_mark_dirty(cs35l41->regmap);
- return 0;
+err:
+ mutex_unlock(&cs35l41->fw_mutex);
+
+ return ret;
}
static int cs35l41_runtime_resume(struct device *dev)
{
struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
- int ret;
+ int ret = 0;
- dev_dbg(cs35l41->dev, "Resume.\n");
+ dev_dbg(cs35l41->dev, "Runtime Resume\n");
if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
- dev_dbg(cs35l41->dev, "System does not support Resume\n");
+ dev_dbg(cs35l41->dev, "Runtime Resume not supported\n");
return 0;
}
- if (!cs35l41->firmware_running)
- return 0;
+ mutex_lock(&cs35l41->fw_mutex);
regcache_cache_only(cs35l41->regmap, false);
- ret = cs35l41_exit_hibernate(cs35l41->dev, cs35l41->regmap);
- if (ret) {
- regcache_cache_only(cs35l41->regmap, true);
- return ret;
+ if (cs35l41->firmware_running) {
+ ret = cs35l41_exit_hibernate(cs35l41->dev, cs35l41->regmap);
+ if (ret) {
+ dev_warn(cs35l41->dev, "Unable to exit Hibernate.");
+ goto err;
+ }
}
/* Test key needs to be unlocked to allow the OTP settings to re-apply */
@@ -609,26 +723,16 @@ static int cs35l41_runtime_resume(struct device *dev)
cs35l41_test_key_lock(cs35l41->dev, cs35l41->regmap);
if (ret) {
dev_err(cs35l41->dev, "Failed to restore register cache: %d\n", ret);
- return ret;
+ goto err;
}
if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST)
cs35l41_init_boost(cs35l41->dev, cs35l41->regmap, &cs35l41->hw_cfg);
- return 0;
-}
-
-static int cs35l41_hda_suspend_hook(struct device *dev)
-{
- dev_dbg(dev, "Request Suspend\n");
- pm_runtime_mark_last_busy(dev);
- return pm_runtime_put_autosuspend(dev);
-}
+err:
+ mutex_unlock(&cs35l41->fw_mutex);
-static int cs35l41_hda_resume_hook(struct device *dev)
-{
- dev_dbg(dev, "Request Resume\n");
- return pm_runtime_get_sync(dev);
+ return ret;
}
static int cs35l41_smart_amp(struct cs35l41_hda *cs35l41)
@@ -678,8 +782,6 @@ clean_dsp:
static void cs35l41_load_firmware(struct cs35l41_hda *cs35l41, bool load)
{
- pm_runtime_get_sync(cs35l41->dev);
-
if (cs35l41->firmware_running && !load) {
dev_dbg(cs35l41->dev, "Unloading Firmware\n");
cs35l41_shutdown_dsp(cs35l41);
@@ -689,9 +791,6 @@ static void cs35l41_load_firmware(struct cs35l41_hda *cs35l41, bool load)
} else {
dev_dbg(cs35l41->dev, "Unable to Load firmware.\n");
}
-
- pm_runtime_mark_last_busy(cs35l41->dev);
- pm_runtime_put_autosuspend(cs35l41->dev);
}
static int cs35l41_fw_load_ctl_get(struct snd_kcontrol *kcontrol,
@@ -707,16 +806,21 @@ static void cs35l41_fw_load_work(struct work_struct *work)
{
struct cs35l41_hda *cs35l41 = container_of(work, struct cs35l41_hda, fw_load_work);
+ pm_runtime_get_sync(cs35l41->dev);
+
mutex_lock(&cs35l41->fw_mutex);
/* Recheck if playback is ongoing, mutex will block playback during firmware loading */
if (cs35l41->playback_started)
- dev_err(cs35l41->dev, "Cannot Load/Unload firmware during Playback\n");
+ dev_err(cs35l41->dev, "Cannot Load/Unload firmware during Playback. Retrying...\n");
else
cs35l41_load_firmware(cs35l41, cs35l41->request_fw_load);
cs35l41->fw_request_ongoing = false;
mutex_unlock(&cs35l41->fw_mutex);
+
+ pm_runtime_mark_last_busy(cs35l41->dev);
+ pm_runtime_put_autosuspend(cs35l41->dev);
}
static int cs35l41_fw_load_ctl_put(struct snd_kcontrol *kcontrol,
@@ -840,6 +944,8 @@ static int cs35l41_hda_bind(struct device *dev, struct device *master, void *mas
pm_runtime_get_sync(dev);
+ mutex_lock(&cs35l41->fw_mutex);
+
comps->dev = dev;
if (!cs35l41->acpi_subsystem_id)
cs35l41->acpi_subsystem_id = kasprintf(GFP_KERNEL, "%.8x",
@@ -852,10 +958,8 @@ static int cs35l41_hda_bind(struct device *dev, struct device *master, void *mas
if (firmware_autostart) {
dev_dbg(cs35l41->dev, "Firmware Autostart.\n");
cs35l41->request_fw_load = true;
- mutex_lock(&cs35l41->fw_mutex);
if (cs35l41_smart_amp(cs35l41) < 0)
dev_warn(cs35l41->dev, "Cannot Run Firmware, reverting to dsp bypass...\n");
- mutex_unlock(&cs35l41->fw_mutex);
} else {
dev_dbg(cs35l41->dev, "Firmware Autostart is disabled.\n");
}
@@ -863,8 +967,8 @@ static int cs35l41_hda_bind(struct device *dev, struct device *master, void *mas
ret = cs35l41_create_controls(cs35l41);
comps->playback_hook = cs35l41_hda_playback_hook;
- comps->suspend_hook = cs35l41_hda_suspend_hook;
- comps->resume_hook = cs35l41_hda_resume_hook;
+
+ mutex_unlock(&cs35l41->fw_mutex);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -1433,6 +1537,7 @@ EXPORT_SYMBOL_NS_GPL(cs35l41_hda_remove, SND_HDA_SCODEC_CS35L41);
const struct dev_pm_ops cs35l41_hda_pm_ops = {
RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(cs35l41_system_suspend, cs35l41_system_resume)
};
EXPORT_SYMBOL_NS_GPL(cs35l41_hda_pm_ops, SND_HDA_SCODEC_CS35L41);
diff --git a/sound/pci/hda/hda_component.h b/sound/pci/hda/hda_component.h
index 1223621bd62c..534e845b9cd1 100644
--- a/sound/pci/hda/hda_component.h
+++ b/sound/pci/hda/hda_component.h
@@ -16,6 +16,4 @@ struct hda_component {
char name[HDA_MAX_NAME_SIZE];
struct hda_codec *codec;
void (*playback_hook)(struct device *dev, int action);
- int (*suspend_hook)(struct device *dev);
- int (*resume_hook)(struct device *dev);
};
diff --git a/sound/pci/hda/hda_cs_dsp_ctl.c b/sound/pci/hda/hda_cs_dsp_ctl.c
index 89ee549cb7d5..1622a22f96f6 100644
--- a/sound/pci/hda/hda_cs_dsp_ctl.c
+++ b/sound/pci/hda/hda_cs_dsp_ctl.c
@@ -97,7 +97,7 @@ static unsigned int wmfw_convert_flags(unsigned int in)
return out;
}
-static int hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char *name)
+static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char *name)
{
struct cs_dsp_coeff_ctl *cs_ctl = ctl->cs_ctl;
struct snd_kcontrol_new kcontrol = {0};
@@ -107,7 +107,7 @@ static int hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char
if (cs_ctl->len > ADSP_MAX_STD_CTRL_SIZE) {
dev_err(cs_ctl->dsp->dev, "KControl %s: length %zu exceeds maximum %d\n", name,
cs_ctl->len, ADSP_MAX_STD_CTRL_SIZE);
- return -EINVAL;
+ return;
}
kcontrol.name = name;
@@ -120,24 +120,21 @@ static int hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char
/* Save ctl inside private_data, ctl is owned by cs_dsp,
* and will be freed when cs_dsp removes the control */
kctl = snd_ctl_new1(&kcontrol, (void *)ctl);
- if (!kctl) {
- ret = -ENOMEM;
- return ret;
- }
+ if (!kctl)
+ return;
ret = snd_ctl_add(ctl->card, kctl);
if (ret) {
dev_err(cs_ctl->dsp->dev, "Failed to add KControl %s = %d\n", kcontrol.name, ret);
- return ret;
+ return;
}
dev_dbg(cs_ctl->dsp->dev, "Added KControl: %s\n", kcontrol.name);
ctl->kctl = kctl;
-
- return 0;
}
-int hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl, struct hda_cs_dsp_ctl_info *info)
+static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
+ const struct hda_cs_dsp_ctl_info *info)
{
struct cs_dsp *cs_dsp = cs_ctl->dsp;
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
@@ -145,13 +142,10 @@ int hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl, struct hda_cs_dsp_ct
const char *region_name;
int ret;
- if (cs_ctl->flags & WMFW_CTL_FLAG_SYS)
- return 0;
-
region_name = cs_dsp_mem_region_name(cs_ctl->alg_region.type);
if (!region_name) {
- dev_err(cs_dsp->dev, "Unknown region type: %d\n", cs_ctl->alg_region.type);
- return -EINVAL;
+ dev_warn(cs_dsp->dev, "Unknown region type: %d\n", cs_ctl->alg_region.type);
+ return;
}
ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s %.12s %x", info->device_name,
@@ -171,22 +165,39 @@ int hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl, struct hda_cs_dsp_ct
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl)
- return -ENOMEM;
+ return;
ctl->cs_ctl = cs_ctl;
ctl->card = info->card;
cs_ctl->priv = ctl;
- ret = hda_cs_dsp_add_kcontrol(ctl, name);
- if (ret) {
- dev_err(cs_dsp->dev, "Error (%d) adding control %s\n", ret, name);
- kfree(ctl);
- return ret;
- }
+ hda_cs_dsp_add_kcontrol(ctl, name);
+}
- return 0;
+void hda_cs_dsp_add_controls(struct cs_dsp *dsp, const struct hda_cs_dsp_ctl_info *info)
+{
+ struct cs_dsp_coeff_ctl *cs_ctl;
+
+ /*
+ * pwr_lock would cause mutex inversion with ALSA control lock compared
+ * to the get/put functions.
+ * It is safe to walk the list without holding a mutex because entries
+ * are persistent and only cs_dsp_power_up() or cs_dsp_remove() can
+ * change the list.
+ */
+ lockdep_assert_not_held(&dsp->pwr_lock);
+
+ list_for_each_entry(cs_ctl, &dsp->ctl_list, list) {
+ if (cs_ctl->flags & WMFW_CTL_FLAG_SYS)
+ continue;
+
+ if (cs_ctl->priv)
+ continue;
+
+ hda_cs_dsp_control_add(cs_ctl, info);
+ }
}
-EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_control_add, SND_HDA_CS_DSP_CONTROLS);
+EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_add_controls, SND_HDA_CS_DSP_CONTROLS);
void hda_cs_dsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl)
{
@@ -203,19 +214,18 @@ int hda_cs_dsp_write_ctl(struct cs_dsp *dsp, const char *name, int type,
struct hda_cs_dsp_coeff_ctl *ctl;
int ret;
+ mutex_lock(&dsp->pwr_lock);
cs_ctl = cs_dsp_get_ctl(dsp, name, type, alg);
- if (!cs_ctl)
- return -EINVAL;
-
- ctl = cs_ctl->priv;
-
ret = cs_dsp_coeff_write_ctrl(cs_ctl, 0, buf, len);
+ mutex_unlock(&dsp->pwr_lock);
if (ret)
return ret;
if (cs_ctl->flags & WMFW_CTL_FLAG_SYS)
return 0;
+ ctl = cs_ctl->priv;
+
snd_ctl_notify(ctl->card, SNDRV_CTL_EVENT_MASK_VALUE, &ctl->kctl->id);
return 0;
@@ -225,13 +235,14 @@ EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_write_ctl, SND_HDA_CS_DSP_CONTROLS);
int hda_cs_dsp_read_ctl(struct cs_dsp *dsp, const char *name, int type,
unsigned int alg, void *buf, size_t len)
{
- struct cs_dsp_coeff_ctl *cs_ctl;
+ int ret;
- cs_ctl = cs_dsp_get_ctl(dsp, name, type, alg);
- if (!cs_ctl)
- return -EINVAL;
+ mutex_lock(&dsp->pwr_lock);
+ ret = cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(dsp, name, type, alg), 0, buf, len);
+ mutex_unlock(&dsp->pwr_lock);
+
+ return ret;
- return cs_dsp_coeff_read_ctrl(cs_ctl, 0, buf, len);
}
EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_read_ctl, SND_HDA_CS_DSP_CONTROLS);
diff --git a/sound/pci/hda/hda_cs_dsp_ctl.h b/sound/pci/hda/hda_cs_dsp_ctl.h
index 4babc69cf2f0..2cf93359c4f2 100644
--- a/sound/pci/hda/hda_cs_dsp_ctl.h
+++ b/sound/pci/hda/hda_cs_dsp_ctl.h
@@ -29,7 +29,7 @@ struct hda_cs_dsp_ctl_info {
extern const char * const hda_cs_dsp_fw_ids[HDA_CS_DSP_NUM_FW];
-int hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl, struct hda_cs_dsp_ctl_info *info);
+void hda_cs_dsp_add_controls(struct cs_dsp *dsp, const struct hda_cs_dsp_ctl_info *info);
void hda_cs_dsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl);
int hda_cs_dsp_write_ctl(struct cs_dsp *dsp, const char *name, int type,
unsigned int alg, const void *buf, size_t len);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bce82b834cec..e6c4bb5fa041 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4022,22 +4022,16 @@ static void alc5505_dsp_init(struct hda_codec *codec)
static int alc269_suspend(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- int i;
if (spec->has_alc5505_dsp)
alc5505_dsp_suspend(codec);
- for (i = 0; i < HDA_MAX_COMPONENTS; i++)
- if (spec->comps[i].suspend_hook)
- spec->comps[i].suspend_hook(spec->comps[i].dev);
-
return alc_suspend(codec);
}
static int alc269_resume(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- int i;
if (spec->codec_variant == ALC269_TYPE_ALC269VB)
alc269vb_toggle_power_output(codec, 0);
@@ -4068,10 +4062,6 @@ static int alc269_resume(struct hda_codec *codec)
if (spec->has_alc5505_dsp)
alc5505_dsp_resume(codec);
- for (i = 0; i < HDA_MAX_COMPONENTS; i++)
- if (spec->comps[i].resume_hook)
- spec->comps[i].resume_hook(spec->comps[i].dev);
-
return 0;
}
#endif /* CONFIG_PM */
@@ -6664,19 +6654,12 @@ static int comp_bind(struct device *dev)
{
struct hda_codec *cdc = dev_to_hda_codec(dev);
struct alc_spec *spec = cdc->spec;
- int ret, i;
+ int ret;
ret = component_bind_all(dev, spec->comps);
if (ret)
return ret;
- if (snd_hdac_is_power_on(&cdc->core)) {
- codec_dbg(cdc, "Resuming after bind.\n");
- for (i = 0; i < HDA_MAX_COMPONENTS; i++)
- if (spec->comps[i].resume_hook)
- spec->comps[i].resume_hook(spec->comps[i].dev);
- }
-
return 0;
}
@@ -8449,11 +8432,13 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC285_FIXUP_ASUS_G533Z_PINS] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
- { 0x14, 0x90170120 },
+ { 0x14, 0x90170152 }, /* Speaker Surround Playback Switch */
+ { 0x19, 0x03a19020 }, /* Mic Boost Volume */
+ { 0x1a, 0x03a11c30 }, /* Mic Boost Volume */
+ { 0x1e, 0x90170151 }, /* Rear jack, IN OUT EAPD Detect */
+ { 0x21, 0x03211420 },
{ }
},
- .chained = true,
- .chain_id = ALC294_FIXUP_ASUS_G513_PINS,
},
[ALC294_FIXUP_ASUS_COEF_1B] = {
.type = HDA_FIXUP_VERBS,
@@ -9198,7 +9183,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
- SND_PCI_QUIRK(0x1028, 0x087d, "Dell Precision 5530", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
@@ -9422,6 +9406,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -9443,6 +9428,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
diff --git a/sound/usb/card.h b/sound/usb/card.h
index ca75f2206170..40061550105a 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -129,7 +129,8 @@ struct snd_usb_endpoint {
in a stream */
bool implicit_fb_sync; /* syncs with implicit feedback */
bool lowlatency_playback; /* low-latency playback mode */
- bool need_setup; /* (re-)need for configure? */
+ bool need_setup; /* (re-)need for hw_params? */
+ bool need_prepare; /* (re-)need for prepare? */
/* for hw constraints */
const struct audioformat *cur_audiofmt;
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 48a3843a08f1..d0b8d61d1d22 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -32,6 +32,7 @@ struct snd_usb_iface_ref {
unsigned char iface;
bool need_setup;
int opened;
+ int altset;
struct list_head list;
};
@@ -823,6 +824,7 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
ep->implicit_fb_sync = fp->implicit_fb;
ep->need_setup = true;
+ ep->need_prepare = true;
usb_audio_dbg(chip, " channels=%d, rate=%d, format=%s, period_bytes=%d, periods=%d, implicit_fb=%d\n",
ep->cur_channels, ep->cur_rate,
@@ -899,6 +901,9 @@ static int endpoint_set_interface(struct snd_usb_audio *chip,
int altset = set ? ep->altsetting : 0;
int err;
+ if (ep->iface_ref->altset == altset)
+ return 0;
+
usb_audio_dbg(chip, "Setting usb interface %d:%d for EP 0x%x\n",
ep->iface, altset, ep->ep_num);
err = usb_set_interface(chip->dev, ep->iface, altset);
@@ -910,6 +915,7 @@ static int endpoint_set_interface(struct snd_usb_audio *chip,
if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY)
msleep(50);
+ ep->iface_ref->altset = altset;
return 0;
}
@@ -947,7 +953,7 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
/* Prepare for suspening EP, called from the main suspend handler */
void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep)
{
- ep->need_setup = true;
+ ep->need_prepare = true;
if (ep->iface_ref)
ep->iface_ref->need_setup = true;
if (ep->clock_ref)
@@ -1330,12 +1336,16 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
struct snd_usb_endpoint *ep)
{
const struct audioformat *fmt = ep->cur_audiofmt;
- int err;
+ int err = 0;
+
+ mutex_lock(&chip->mutex);
+ if (!ep->need_setup)
+ goto unlock;
/* release old buffers, if any */
err = release_urbs(ep, false);
if (err < 0)
- return err;
+ goto unlock;
ep->datainterval = fmt->datainterval;
ep->maxpacksize = fmt->maxpacksize;
@@ -1373,13 +1383,21 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
usb_audio_dbg(chip, "Set up %d URBS, ret=%d\n", ep->nurbs, err);
if (err < 0)
- return err;
+ goto unlock;
/* some unit conversions in runtime */
ep->maxframesize = ep->maxpacksize / ep->cur_frame_bytes;
ep->curframesize = ep->curpacksize / ep->cur_frame_bytes;
- return update_clock_ref_rate(chip, ep);
+ err = update_clock_ref_rate(chip, ep);
+ if (err >= 0) {
+ ep->need_setup = false;
+ err = 0;
+ }
+
+ unlock:
+ mutex_unlock(&chip->mutex);
+ return err;
}
static int init_sample_rate(struct snd_usb_audio *chip,
@@ -1426,7 +1444,7 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
mutex_lock(&chip->mutex);
if (WARN_ON(!ep->iface_ref))
goto unlock;
- if (!ep->need_setup)
+ if (!ep->need_prepare)
goto unlock;
/* If the interface has been already set up, just set EP parameters */
@@ -1480,7 +1498,7 @@ int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
ep->iface_ref->need_setup = false;
done:
- ep->need_setup = false;
+ ep->need_prepare = false;
err = 1;
unlock:
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 6674bdb096f3..10ac52705892 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -155,6 +155,11 @@
* Return Stack Buffer Predictions.
*/
+#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
+ * IA32_XAPIC_DISABLE_STATUS MSR
+ * supported
+ */
+
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
* Writeback and invalidate the
@@ -585,6 +590,9 @@
#define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301
#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302
+/* AMD Last Branch Record MSRs */
+#define MSR_AMD64_LBR_SELECT 0xc000010e
+
/* Fam 17h MSRs */
#define MSR_F17H_IRPERF 0xc00000e9
@@ -756,6 +764,8 @@
#define MSR_AMD_DBG_EXTN_CFG 0xc000010f
#define MSR_AMD_SAMP_BR_FROM 0xc0010300
+#define DBG_EXTN_CFG_LBRV2EN BIT_ULL(6)
+
#define MSR_IA32_MPERF 0x000000e7
#define MSR_IA32_APERF 0x000000e8
@@ -1054,4 +1064,12 @@
#define MSR_IA32_HW_FEEDBACK_PTR 0x17d0
#define MSR_IA32_HW_FEEDBACK_CONFIG 0x17d1
+/* x2APIC locked status */
+#define MSR_IA32_XAPIC_DISABLE_STATUS 0xBD
+#define LEGACY_XAPIC_DISABLED BIT(0) /*
+ * x2APIC mode is locked and
+ * disabling x2APIC will cause
+ * a #GP
+ */
+
#endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index e282faf8fd75..ad47d7b31046 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -6,7 +6,6 @@
#include <linux/types.h>
#include <linux/limits.h>
#include <linux/bpf.h>
-#include <linux/compiler.h>
#include <sys/types.h> /* pid_t */
#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
@@ -207,7 +206,7 @@ struct perf_record_range_cpu_map {
__u16 end_cpu;
};
-struct __packed perf_record_cpu_map_data {
+struct perf_record_cpu_map_data {
__u16 type;
union {
/* Used when type == PERF_CPU_MAP__CPUS. */
@@ -219,7 +218,7 @@ struct __packed perf_record_cpu_map_data {
/* Used when type == PERF_CPU_MAP__RANGE_CPUS. */
struct perf_record_range_cpu_map range_cpu_data;
};
-};
+} __attribute__((packed));
#pragma GCC diagnostic pop
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
index 5fc6a2a3dbc5..deeb163999ce 100644
--- a/tools/perf/arch/arm/util/auxtrace.c
+++ b/tools/perf/arch/arm/util/auxtrace.c
@@ -4,9 +4,11 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <dirent.h>
#include <stdbool.h>
#include <linux/coresight-pmu.h>
#include <linux/zalloc.h>
+#include <api/fs/fs.h>
#include "../../../util/auxtrace.h"
#include "../../../util/debug.h"
@@ -14,6 +16,7 @@
#include "../../../util/pmu.h"
#include "cs-etm.h"
#include "arm-spe.h"
+#include "hisi-ptt.h"
static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
{
@@ -50,42 +53,114 @@ static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
return arm_spe_pmus;
}
+static struct perf_pmu **find_all_hisi_ptt_pmus(int *nr_ptts, int *err)
+{
+ const char *sysfs = sysfs__mountpoint();
+ struct perf_pmu **hisi_ptt_pmus = NULL;
+ struct dirent *dent;
+ char path[PATH_MAX];
+ DIR *dir = NULL;
+ int idx = 0;
+
+ snprintf(path, PATH_MAX, "%s" EVENT_SOURCE_DEVICE_PATH, sysfs);
+ dir = opendir(path);
+ if (!dir) {
+ pr_err("can't read directory '%s'\n", EVENT_SOURCE_DEVICE_PATH);
+ *err = -EINVAL;
+ return NULL;
+ }
+
+ while ((dent = readdir(dir))) {
+ if (strstr(dent->d_name, HISI_PTT_PMU_NAME))
+ (*nr_ptts)++;
+ }
+
+ if (!(*nr_ptts))
+ goto out;
+
+ hisi_ptt_pmus = zalloc(sizeof(struct perf_pmu *) * (*nr_ptts));
+ if (!hisi_ptt_pmus) {
+ pr_err("hisi_ptt alloc failed\n");
+ *err = -ENOMEM;
+ goto out;
+ }
+
+ rewinddir(dir);
+ while ((dent = readdir(dir))) {
+ if (strstr(dent->d_name, HISI_PTT_PMU_NAME) && idx < *nr_ptts) {
+ hisi_ptt_pmus[idx] = perf_pmu__find(dent->d_name);
+ if (hisi_ptt_pmus[idx])
+ idx++;
+ }
+ }
+
+out:
+ closedir(dir);
+ return hisi_ptt_pmus;
+}
+
+static struct perf_pmu *find_pmu_for_event(struct perf_pmu **pmus,
+ int pmu_nr, struct evsel *evsel)
+{
+ int i;
+
+ if (!pmus)
+ return NULL;
+
+ for (i = 0; i < pmu_nr; i++) {
+ if (evsel->core.attr.type == pmus[i]->type)
+ return pmus[i];
+ }
+
+ return NULL;
+}
+
struct auxtrace_record
*auxtrace_record__init(struct evlist *evlist, int *err)
{
- struct perf_pmu *cs_etm_pmu;
+ struct perf_pmu *cs_etm_pmu = NULL;
+ struct perf_pmu **arm_spe_pmus = NULL;
+ struct perf_pmu **hisi_ptt_pmus = NULL;
struct evsel *evsel;
- bool found_etm = false;
+ struct perf_pmu *found_etm = NULL;
struct perf_pmu *found_spe = NULL;
- struct perf_pmu **arm_spe_pmus = NULL;
+ struct perf_pmu *found_ptt = NULL;
+ int auxtrace_event_cnt = 0;
int nr_spes = 0;
- int i = 0;
+ int nr_ptts = 0;
if (!evlist)
return NULL;
cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err);
+ hisi_ptt_pmus = find_all_hisi_ptt_pmus(&nr_ptts, err);
evlist__for_each_entry(evlist, evsel) {
- if (cs_etm_pmu &&
- evsel->core.attr.type == cs_etm_pmu->type)
- found_etm = true;
-
- if (!nr_spes || found_spe)
- continue;
-
- for (i = 0; i < nr_spes; i++) {
- if (evsel->core.attr.type == arm_spe_pmus[i]->type) {
- found_spe = arm_spe_pmus[i];
- break;
- }
- }
+ if (cs_etm_pmu && !found_etm)
+ found_etm = find_pmu_for_event(&cs_etm_pmu, 1, evsel);
+
+ if (arm_spe_pmus && !found_spe)
+ found_spe = find_pmu_for_event(arm_spe_pmus, nr_spes, evsel);
+
+ if (hisi_ptt_pmus && !found_ptt)
+ found_ptt = find_pmu_for_event(hisi_ptt_pmus, nr_ptts, evsel);
}
+
free(arm_spe_pmus);
+ free(hisi_ptt_pmus);
+
+ if (found_etm)
+ auxtrace_event_cnt++;
- if (found_etm && found_spe) {
- pr_err("Concurrent ARM Coresight ETM and SPE operation not currently supported\n");
+ if (found_spe)
+ auxtrace_event_cnt++;
+
+ if (found_ptt)
+ auxtrace_event_cnt++;
+
+ if (auxtrace_event_cnt > 1) {
+ pr_err("Concurrent AUX trace operation not currently supported\n");
*err = -EOPNOTSUPP;
return NULL;
}
@@ -96,6 +171,9 @@ struct auxtrace_record
#if defined(__aarch64__)
if (found_spe)
return arm_spe_recording_init(err, found_spe);
+
+ if (found_ptt)
+ return hisi_ptt_recording_init(err, found_ptt);
#endif
/*
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index b8b23b9dc598..887c8addc491 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -10,6 +10,7 @@
#include <linux/string.h>
#include "arm-spe.h"
+#include "hisi-ptt.h"
#include "../../../util/pmu.h"
struct perf_event_attr
@@ -22,6 +23,8 @@ struct perf_event_attr
#if defined(__aarch64__)
} else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
return arm_spe_pmu_default_config(pmu);
+ } else if (strstarts(pmu->name, HISI_PTT_PMU_NAME)) {
+ pmu->selectable = true;
#endif
}
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 037e292ecd8e..4af0c3a0f86e 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -102,7 +102,7 @@ static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
if (err)
goto out_free_arm;
/* b, b.cond, br, cbz/cbnz, tbz/tbnz */
- err = regcomp(&arm->jump_insn, "^[ct]?br?\\.?(cc|cs|eq|ge|gt|hi|le|ls|lt|mi|ne|pl)?n?z?$",
+ err = regcomp(&arm->jump_insn, "^[ct]?br?\\.?(cc|cs|eq|ge|gt|hi|hs|le|lo|ls|lt|mi|ne|pl|vc|vs)?n?z?$",
REG_EXTENDED);
if (err)
goto out_free_call;
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index 9fcb4e68add9..337aa9bdf905 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -11,4 +11,4 @@ perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
perf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
../../arm/util/cs-etm.o \
- arm-spe.o mem-events.o
+ arm-spe.o mem-events.o hisi-ptt.o
diff --git a/tools/perf/arch/arm64/util/hisi-ptt.c b/tools/perf/arch/arm64/util/hisi-ptt.c
new file mode 100644
index 000000000000..ba97c8a562a0
--- /dev/null
+++ b/tools/perf/arch/arm64/util/hisi-ptt.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HiSilicon PCIe Trace and Tuning (PTT) support
+ * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/zalloc.h>
+#include <time.h>
+
+#include <internal/lib.h> // page_size
+#include "../../../util/auxtrace.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/debug.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/hisi-ptt.h"
+#include "../../../util/pmu.h"
+#include "../../../util/record.h"
+#include "../../../util/session.h"
+#include "../../../util/tsc.h"
+
+#define KiB(x) ((x) * 1024)
+#define MiB(x) ((x) * 1024 * 1024)
+
+struct hisi_ptt_recording {
+ struct auxtrace_record itr;
+ struct perf_pmu *hisi_ptt_pmu;
+ struct evlist *evlist;
+};
+
+static size_t
+hisi_ptt_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+ struct evlist *evlist __maybe_unused)
+{
+ return HISI_PTT_AUXTRACE_PRIV_SIZE;
+}
+
+static int hisi_ptt_info_fill(struct auxtrace_record *itr,
+ struct perf_session *session,
+ struct perf_record_auxtrace_info *auxtrace_info,
+ size_t priv_size)
+{
+ struct hisi_ptt_recording *pttr =
+ container_of(itr, struct hisi_ptt_recording, itr);
+ struct perf_pmu *hisi_ptt_pmu = pttr->hisi_ptt_pmu;
+
+ if (priv_size != HISI_PTT_AUXTRACE_PRIV_SIZE)
+ return -EINVAL;
+
+ if (!session->evlist->core.nr_mmaps)
+ return -EINVAL;
+
+ auxtrace_info->type = PERF_AUXTRACE_HISI_PTT;
+ auxtrace_info->priv[0] = hisi_ptt_pmu->type;
+
+ return 0;
+}
+
+static int hisi_ptt_set_auxtrace_mmap_page(struct record_opts *opts)
+{
+ bool privileged = perf_event_paranoid_check(-1);
+
+ if (!opts->full_auxtrace)
+ return 0;
+
+ if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(16) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ }
+
+ /* Validate auxtrace_mmap_pages */
+ if (opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
+ size_t min_sz = KiB(8);
+
+ if (sz < min_sz || !is_power_of_2(sz)) {
+ pr_err("Invalid mmap size for HISI PTT: must be at least %zuKiB and a power of 2\n",
+ min_sz / 1024);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int hisi_ptt_recording_options(struct auxtrace_record *itr,
+ struct evlist *evlist,
+ struct record_opts *opts)
+{
+ struct hisi_ptt_recording *pttr =
+ container_of(itr, struct hisi_ptt_recording, itr);
+ struct perf_pmu *hisi_ptt_pmu = pttr->hisi_ptt_pmu;
+ struct evsel *evsel, *hisi_ptt_evsel = NULL;
+ struct evsel *tracking_evsel;
+ int err;
+
+ pttr->evlist = evlist;
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.attr.type == hisi_ptt_pmu->type) {
+ if (hisi_ptt_evsel) {
+ pr_err("There may be only one " HISI_PTT_PMU_NAME "x event\n");
+ return -EINVAL;
+ }
+ evsel->core.attr.freq = 0;
+ evsel->core.attr.sample_period = 1;
+ evsel->needs_auxtrace_mmap = true;
+ hisi_ptt_evsel = evsel;
+ opts->full_auxtrace = true;
+ }
+ }
+
+ err = hisi_ptt_set_auxtrace_mmap_page(opts);
+ if (err)
+ return err;
+ /*
+ * To obtain the auxtrace buffer file descriptor, the auxtrace event
+ * must come first.
+ */
+ evlist__to_front(evlist, hisi_ptt_evsel);
+ evsel__set_sample_bit(hisi_ptt_evsel, TIME);
+
+ /* Add dummy event to keep tracking */
+ err = parse_event(evlist, "dummy:u");
+ if (err)
+ return err;
+
+ tracking_evsel = evlist__last(evlist);
+ evlist__set_tracking_event(evlist, tracking_evsel);
+
+ tracking_evsel->core.attr.freq = 0;
+ tracking_evsel->core.attr.sample_period = 1;
+ evsel__set_sample_bit(tracking_evsel, TIME);
+
+ return 0;
+}
+
+static u64 hisi_ptt_reference(struct auxtrace_record *itr __maybe_unused)
+{
+ return rdtsc();
+}
+
+static void hisi_ptt_recording_free(struct auxtrace_record *itr)
+{
+ struct hisi_ptt_recording *pttr =
+ container_of(itr, struct hisi_ptt_recording, itr);
+
+ free(pttr);
+}
+
+struct auxtrace_record *hisi_ptt_recording_init(int *err,
+ struct perf_pmu *hisi_ptt_pmu)
+{
+ struct hisi_ptt_recording *pttr;
+
+ if (!hisi_ptt_pmu) {
+ *err = -ENODEV;
+ return NULL;
+ }
+
+ pttr = zalloc(sizeof(*pttr));
+ if (!pttr) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ pttr->hisi_ptt_pmu = hisi_ptt_pmu;
+ pttr->itr.pmu = hisi_ptt_pmu;
+ pttr->itr.recording_options = hisi_ptt_recording_options;
+ pttr->itr.info_priv_size = hisi_ptt_info_priv_size;
+ pttr->itr.info_fill = hisi_ptt_info_fill;
+ pttr->itr.free = hisi_ptt_recording_free;
+ pttr->itr.reference = hisi_ptt_reference;
+ pttr->itr.read_finish = auxtrace_record__read_finish;
+ pttr->itr.alignment = 0;
+
+ *err = 0;
+ return &pttr->itr;
+}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 793b35f2221a..af102f471e9f 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -866,7 +866,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* User space tasks can migrate between CPUs, so when tracing
* selected CPUs, sideband for all CPUs is still needed.
*/
- need_system_wide_tracking = evlist->core.has_user_cpus &&
+ need_system_wide_tracking = opts->target.cpu_list &&
!intel_pt_evsel->core.attr.exclude_user;
tracking_evsel = evlist__add_aux_dummy(evlist, need_system_wide_tracking);
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 744dd3520584..58e1ec1654ef 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -60,7 +60,7 @@ int cmd_list(int argc, const char **argv)
setup_pager();
if (!raw_dump && pager_in_use())
- printf("\nList of pre-defined events (to be used in -e):\n\n");
+ printf("\nList of pre-defined events (to be used in -e or -M):\n\n");
if (hybrid_type) {
pmu_name = perf_pmu__hybrid_type_to_pmu(hybrid_type);
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index f7dd8216de72..923fb8316fda 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -97,6 +97,9 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
else
rec_argc = argc + 9 * perf_pmu__hybrid_pmu_num();
+ if (mem->cpu_list)
+ rec_argc += 2;
+
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
@@ -159,6 +162,11 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (all_kernel)
rec_argv[i++] = "--all-kernel";
+ if (mem->cpu_list) {
+ rec_argv[i++] = "-C";
+ rec_argv[i++] = mem->cpu_list;
+ }
+
for (j = 0; j < argc; j++, i++)
rec_argv[i] = argv[j];
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index 8c10955eff93..3ef07a12aa14 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -9,7 +9,7 @@ size=128
config=0
sample_period=*
sample_type=263
-read_format=0|4
+read_format=0|4|20
disabled=1
inherit=1
pinned=0
diff --git a/tools/perf/tests/attr/system-wide-dummy b/tools/perf/tests/attr/system-wide-dummy
index 86a15dd359d9..8fec06eda5f9 100644
--- a/tools/perf/tests/attr/system-wide-dummy
+++ b/tools/perf/tests/attr/system-wide-dummy
@@ -11,7 +11,7 @@ size=128
config=9
sample_period=4000
sample_type=455
-read_format=4
+read_format=4|20
# Event will be enabled right away.
disabled=0
inherit=1
diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group
index 14ee60fd3f41..6c1cff8aae8b 100644
--- a/tools/perf/tests/attr/test-record-group
+++ b/tools/perf/tests/attr/test-record-group
@@ -7,14 +7,14 @@ ret = 1
fd=1
group_fd=-1
sample_type=327
-read_format=4
+read_format=4|20
[event-2:base-record]
fd=2
group_fd=1
config=1
sample_type=327
-read_format=4
+read_format=4|20
mmap=0
comm=0
task=0
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index 300b9f7e6d69..97e7e64a38f0 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -7,7 +7,7 @@ ret = 1
fd=1
group_fd=-1
sample_type=343
-read_format=12
+read_format=12|28
inherit=0
[event-2:base-record]
@@ -21,8 +21,8 @@ config=3
# default | PERF_SAMPLE_READ
sample_type=343
-# PERF_FORMAT_ID | PERF_FORMAT_GROUP
-read_format=12
+# PERF_FORMAT_ID | PERF_FORMAT_GROUP | PERF_FORMAT_LOST
+read_format=12|28
task=0
mmap=0
comm=0
diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1
index 3ffe246e0228..eeb1db392bc9 100644
--- a/tools/perf/tests/attr/test-record-group1
+++ b/tools/perf/tests/attr/test-record-group1
@@ -7,7 +7,7 @@ ret = 1
fd=1
group_fd=-1
sample_type=327
-read_format=4
+read_format=4|20
[event-2:base-record]
fd=2
@@ -15,7 +15,7 @@ group_fd=1
type=0
config=1
sample_type=327
-read_format=4
+read_format=4|20
mmap=0
comm=0
task=0
diff --git a/tools/perf/tests/attr/test-record-group2 b/tools/perf/tests/attr/test-record-group2
index 6b9f8d182ce1..cebdaa8e64e4 100644
--- a/tools/perf/tests/attr/test-record-group2
+++ b/tools/perf/tests/attr/test-record-group2
@@ -9,7 +9,7 @@ group_fd=-1
config=0|1
sample_period=1234000
sample_type=87
-read_format=12
+read_format=12|28
inherit=0
freq=0
@@ -19,7 +19,7 @@ group_fd=1
config=0|1
sample_period=6789000
sample_type=87
-read_format=12
+read_format=12|28
disabled=0
inherit=0
mmap=0
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index eb5196f58190..b7f050aa6210 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -6,6 +6,8 @@
set -e
+skip_test=0
+
function commachecker()
{
local -i cnt=0
@@ -156,14 +158,47 @@ check_per_socket()
echo "[Success]"
}
+# The perf stat options for per-socket, per-core, per-die
+# and -A ( no_aggr mode ) uses the info fetched from this
+# directory: "/sys/devices/system/cpu/cpu*/topology". For
+# example, socket value is fetched from "physical_package_id"
+# file in topology directory.
+# Reference: cpu__get_topology_int in util/cpumap.c
+# If the platform doesn't expose topology information, values
+# will be set to -1. For example, incase of pSeries platform
+# of powerpc, value for "physical_package_id" is restricted
+# and set to -1. Check here validates the socket-id read from
+# topology file before proceeding further
+
+FILE_LOC="/sys/devices/system/cpu/cpu*/topology/"
+FILE_NAME="physical_package_id"
+
+check_for_topology()
+{
+ if ! ParanoidAndNotRoot 0
+ then
+ socket_file=`ls $FILE_LOC/$FILE_NAME | head -n 1`
+ [ -z $socket_file ] && return 0
+ socket_id=`cat $socket_file`
+ [ $socket_id == -1 ] && skip_test=1
+ return 0
+ fi
+}
+
+check_for_topology
check_no_args
check_system_wide
-check_system_wide_no_aggr
check_interval
check_event
-check_per_core
check_per_thread
-check_per_die
check_per_node
-check_per_socket
+if [ $skip_test -ne 1 ]
+then
+ check_system_wide_no_aggr
+ check_per_core
+ check_per_die
+ check_per_socket
+else
+ echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid"
+fi
exit 0
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index ea8714a36051..2c4212c641ed 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -6,6 +6,8 @@
set -e
+skip_test=0
+
pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py
if [ "x$PYTHON" == "x" ]
then
@@ -134,14 +136,47 @@ check_per_socket()
echo "[Success]"
}
+# The perf stat options for per-socket, per-core, per-die
+# and -A ( no_aggr mode ) uses the info fetched from this
+# directory: "/sys/devices/system/cpu/cpu*/topology". For
+# example, socket value is fetched from "physical_package_id"
+# file in topology directory.
+# Reference: cpu__get_topology_int in util/cpumap.c
+# If the platform doesn't expose topology information, values
+# will be set to -1. For example, incase of pSeries platform
+# of powerpc, value for "physical_package_id" is restricted
+# and set to -1. Check here validates the socket-id read from
+# topology file before proceeding further
+
+FILE_LOC="/sys/devices/system/cpu/cpu*/topology/"
+FILE_NAME="physical_package_id"
+
+check_for_topology()
+{
+ if ! ParanoidAndNotRoot 0
+ then
+ socket_file=`ls $FILE_LOC/$FILE_NAME | head -n 1`
+ [ -z $socket_file ] && return 0
+ socket_id=`cat $socket_file`
+ [ $socket_id == -1 ] && skip_test=1
+ return 0
+ fi
+}
+
+check_for_topology
check_no_args
check_system_wide
-check_system_wide_no_aggr
check_interval
check_event
-check_per_core
check_per_thread
-check_per_die
check_per_node
-check_per_socket
+if [ $skip_test -ne 1 ]
+then
+ check_system_wide_no_aggr
+ check_per_core
+ check_per_die
+ check_per_socket
+else
+ echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid"
+fi
exit 0
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index e4cb4f1806ff..daad786cf48d 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -70,7 +70,7 @@ perf_report_instruction_samples() {
# 68.12% touch libc-2.27.so [.] _dl_addr
# 5.80% touch libc-2.27.so [.] getenv
# 4.35% touch ld-2.27.so [.] _dl_fixup
- perf report --itrace=i1000i --stdio -i ${perfdata} 2>&1 | \
+ perf report --itrace=i20i --stdio -i ${perfdata} 2>&1 | \
egrep " +[0-9]+\.[0-9]+% +$1" > /dev/null 2>&1
}
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
index efaad9566c34..4c0aabbe33bd 100755
--- a/tools/perf/tests/shell/test_intel_pt.sh
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -22,6 +22,8 @@ outfile="${temp_dir}/test-out.txt"
errfile="${temp_dir}/test-err.txt"
workload="${temp_dir}/workload"
awkscript="${temp_dir}/awkscript"
+jitdump_workload="${temp_dir}/jitdump_workload"
+maxbrstack="${temp_dir}/maxbrstack.py"
cleanup()
{
@@ -42,6 +44,21 @@ trap_cleanup()
trap trap_cleanup EXIT TERM INT
+# perf record for testing without decoding
+perf_record_no_decode()
+{
+ # Options to speed up recording: no post-processing, no build-id cache update,
+ # and no BPF events.
+ perf record -B -N --no-bpf-event "$@"
+}
+
+# perf record for testing should not need BPF events
+perf_record_no_bpf()
+{
+ # Options for no BPF events
+ perf record --no-bpf-event "$@"
+}
+
have_workload=false
cat << _end_of_file_ | /usr/bin/cc -o "${workload}" -xc - -pthread && have_workload=true
#include <time.h>
@@ -76,7 +93,7 @@ _end_of_file_
can_cpu_wide()
{
echo "Checking for CPU-wide recording on CPU $1"
- if ! perf record -o "${tmpfile}" -B -N --no-bpf-event -e dummy:u -C "$1" true >/dev/null 2>&1 ; then
+ if ! perf_record_no_decode -o "${tmpfile}" -e dummy:u -C "$1" true >/dev/null 2>&1 ; then
echo "No so skipping"
return 2
fi
@@ -93,7 +110,7 @@ test_system_wide_side_band()
can_cpu_wide 1 || return $?
# Record on CPU 0 a task running on CPU 1
- perf record -B -N --no-bpf-event -o "${perfdatafile}" -e intel_pt//u -C 0 -- taskset --cpu-list 1 uname
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt//u -C 0 -- taskset --cpu-list 1 uname
# Should get MMAP events from CPU 1 because they can be needed to decode
mmap_cnt=$(perf script -i "${perfdatafile}" --no-itrace --show-mmap-events -C 1 2>/dev/null | grep -c MMAP)
@@ -109,7 +126,14 @@ test_system_wide_side_band()
can_kernel()
{
- perf record -o "${tmpfile}" -B -N --no-bpf-event -e dummy:k true >/dev/null 2>&1 || return 2
+ if [ -z "${can_kernel_trace}" ] ; then
+ can_kernel_trace=0
+ perf_record_no_decode -o "${tmpfile}" -e dummy:k true >/dev/null 2>&1 && can_kernel_trace=1
+ fi
+ if [ ${can_kernel_trace} -eq 0 ] ; then
+ echo "SKIP: no kernel tracing"
+ return 2
+ fi
return 0
}
@@ -235,7 +259,7 @@ test_per_thread()
wait_for_threads ${w1} 2
wait_for_threads ${w2} 2
- perf record -B -N --no-bpf-event -o "${perfdatafile}" -e intel_pt//u"${k}" -vvv --per-thread -p "${w1},${w2}" 2>"${errfile}" >"${outfile}" &
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt//u"${k}" -vvv --per-thread -p "${w1},${w2}" 2>"${errfile}" >"${outfile}" &
ppid=$!
echo "perf PID is $ppid"
wait_for_perf_to_start ${ppid} "${errfile}" || return 1
@@ -254,6 +278,342 @@ test_per_thread()
return 0
}
+test_jitdump()
+{
+ echo "--- Test tracing self-modifying code that uses jitdump ---"
+
+ script_path=$(realpath "$0")
+ script_dir=$(dirname "$script_path")
+ jitdump_incl_dir="${script_dir}/../../util"
+ jitdump_h="${jitdump_incl_dir}/jitdump.h"
+
+ if [ ! -e "${jitdump_h}" ] ; then
+ echo "SKIP: Include file jitdump.h not found"
+ return 2
+ fi
+
+ if [ -z "${have_jitdump_workload}" ] ; then
+ have_jitdump_workload=false
+ # Create a workload that uses self-modifying code and generates its own jitdump file
+ cat <<- "_end_of_file_" | /usr/bin/cc -o "${jitdump_workload}" -I "${jitdump_incl_dir}" -xc - -pthread && have_jitdump_workload=true
+ #define _GNU_SOURCE
+ #include <sys/mman.h>
+ #include <sys/types.h>
+ #include <stddef.h>
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <unistd.h>
+ #include <string.h>
+
+ #include "jitdump.h"
+
+ #define CHK_BYTE 0x5a
+
+ static inline uint64_t rdtsc(void)
+ {
+ unsigned int low, high;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return low | ((uint64_t)high) << 32;
+ }
+
+ static FILE *open_jitdump(void)
+ {
+ struct jitheader header = {
+ .magic = JITHEADER_MAGIC,
+ .version = JITHEADER_VERSION,
+ .total_size = sizeof(header),
+ .pid = getpid(),
+ .timestamp = rdtsc(),
+ .flags = JITDUMP_FLAGS_ARCH_TIMESTAMP,
+ };
+ char filename[256];
+ FILE *f;
+ void *m;
+
+ snprintf(filename, sizeof(filename), "jit-%d.dump", getpid());
+ f = fopen(filename, "w+");
+ if (!f)
+ goto err;
+ /* Create an MMAP event for the jitdump file. That is how perf tool finds it. */
+ m = mmap(0, 4096, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
+ if (m == MAP_FAILED)
+ goto err_close;
+ munmap(m, 4096);
+ if (fwrite(&header,sizeof(header),1,f) != 1)
+ goto err_close;
+ return f;
+
+ err_close:
+ fclose(f);
+ err:
+ return NULL;
+ }
+
+ static int write_jitdump(FILE *f, void *addr, const uint8_t *dat, size_t sz, uint64_t *idx)
+ {
+ struct jr_code_load rec = {
+ .p.id = JIT_CODE_LOAD,
+ .p.total_size = sizeof(rec) + sz,
+ .p.timestamp = rdtsc(),
+ .pid = getpid(),
+ .tid = gettid(),
+ .vma = (unsigned long)addr,
+ .code_addr = (unsigned long)addr,
+ .code_size = sz,
+ .code_index = ++*idx,
+ };
+
+ if (fwrite(&rec,sizeof(rec),1,f) != 1 ||
+ fwrite(dat, sz, 1, f) != 1)
+ return -1;
+ return 0;
+ }
+
+ static void close_jitdump(FILE *f)
+ {
+ fclose(f);
+ }
+
+ int main()
+ {
+ /* Get a memory page to store executable code */
+ void *addr = mmap(0, 4096, PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ /* Code to execute: mov CHK_BYTE, %eax ; ret */
+ uint8_t dat[] = {0xb8, CHK_BYTE, 0x00, 0x00, 0x00, 0xc3};
+ FILE *f = open_jitdump();
+ uint64_t idx = 0;
+ int ret = 1;
+
+ if (!f)
+ return 1;
+ /* Copy executable code to executable memory page */
+ memcpy(addr, dat, sizeof(dat));
+ /* Record it in the jitdump file */
+ if (write_jitdump(f, addr, dat, sizeof(dat), &idx))
+ goto out_close;
+ /* Call it */
+ ret = ((int (*)(void))addr)() - CHK_BYTE;
+ out_close:
+ close_jitdump(f);
+ return ret;
+ }
+ _end_of_file_
+ fi
+
+ if ! $have_jitdump_workload ; then
+ echo "SKIP: No jitdump workload"
+ return 2
+ fi
+
+ # Change to temp_dir so jitdump collateral files go there
+ cd "${temp_dir}"
+ perf_record_no_bpf -o "${tmpfile}" -e intel_pt//u "${jitdump_workload}"
+ perf inject -i "${tmpfile}" -o "${perfdatafile}" --jit
+ decode_br_cnt=$(perf script -i "${perfdatafile}" --itrace=b | wc -l)
+ # Note that overflow and lost errors are suppressed for the error count
+ decode_err_cnt=$(perf script -i "${perfdatafile}" --itrace=e-o-l | grep -ci error)
+ cd -
+ # Should be thousands of branches
+ if [ "${decode_br_cnt}" -lt 1000 ] ; then
+ echo "Decode failed, only ${decode_br_cnt} branches"
+ return 1
+ fi
+ # Should be no errors
+ if [ "${decode_err_cnt}" -ne 0 ] ; then
+ echo "Decode failed, ${decode_err_cnt} errors"
+ perf script -i "${perfdatafile}" --itrace=e-o-l --show-mmap-events | cat
+ return 1
+ fi
+
+ echo OK
+ return 0
+}
+
+test_packet_filter()
+{
+ echo "--- Test with MTC and TSC disabled ---"
+ # Disable MTC and TSC
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/mtc=0,tsc=0/u uname
+ # Should not get MTC packet
+ mtc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "MTC 0x")
+ if [ "${mtc_cnt}" -ne 0 ] ; then
+ echo "Failed to filter with mtc=0"
+ return 1
+ fi
+ # Should not get TSC package
+ tsc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "TSC 0x")
+ if [ "${tsc_cnt}" -ne 0 ] ; then
+ echo "Failed to filter with tsc=0"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_disable_branch()
+{
+ echo "--- Test with branches disabled ---"
+ # Disable branch
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/branch=0/u uname
+ # Should not get branch related packets
+ tnt_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "TNT 0x")
+ tip_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "TIP 0x")
+ fup_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "FUP 0x")
+ if [ "${tnt_cnt}" -ne 0 ] || [ "${tip_cnt}" -ne 0 ] || [ "${fup_cnt}" -ne 0 ] ; then
+ echo "Failed to disable branches"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_time_cyc()
+{
+ echo "--- Test with/without CYC ---"
+ # Check if CYC is supported
+ cyc=$(cat /sys/bus/event_source/devices/intel_pt/caps/psb_cyc)
+ if [ "${cyc}" != "1" ] ; then
+ echo "SKIP: CYC is not supported"
+ return 2
+ fi
+ # Enable CYC
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/cyc/u uname
+ # should get CYC packets
+ cyc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "CYC 0x")
+ if [ "${cyc_cnt}" = "0" ] ; then
+ echo "Failed to get CYC packet"
+ return 1
+ fi
+ # Without CYC
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt//u uname
+ # Should not get CYC packets
+ cyc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "CYC 0x")
+ if [ "${cyc_cnt}" -gt 0 ] ; then
+ echo "Still get CYC packet without cyc"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_sample()
+{
+ echo "--- Test recording with sample mode ---"
+ # Check if recording with sample mode is working
+ if ! perf_record_no_decode -o "${perfdatafile}" --aux-sample=8192 -e '{intel_pt//u,branch-misses:u}' uname ; then
+ echo "perf record failed with --aux-sample"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_kernel_trace()
+{
+ echo "--- Test with kernel trace ---"
+ # Check if recording with kernel trace is working
+ can_kernel || return 2
+ if ! perf_record_no_decode -o "${perfdatafile}" -e intel_pt//k -m1,128 uname ; then
+ echo "perf record failed with intel_pt//k"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_virtual_lbr()
+{
+ echo "--- Test virtual LBR ---"
+
+ # Python script to determine the maximum size of branch stacks
+ cat << "_end_of_file_" > "${maxbrstack}"
+from __future__ import print_function
+
+bmax = 0
+
+def process_event(param_dict):
+ if "brstack" in param_dict:
+ brstack = param_dict["brstack"]
+ n = len(brstack)
+ global bmax
+ if n > bmax:
+ bmax = n
+
+def trace_end():
+ print("max brstack", bmax)
+_end_of_file_
+
+ # Check if virtual lbr is working
+ perf_record_no_bpf -o "${perfdatafile}" --aux-sample -e '{intel_pt//,cycles}:u' uname
+ times_val=$(perf script -i "${perfdatafile}" --itrace=L -s "${maxbrstack}" 2>/dev/null | grep "max brstack " | cut -d " " -f 3)
+ case "${times_val}" in
+ [0-9]*) ;;
+ *) times_val=0;;
+ esac
+ if [ "${times_val}" -lt 2 ] ; then
+ echo "Failed with virtual lbr"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_power_event()
+{
+ echo "--- Test power events ---"
+ # Check if power events are supported
+ power_event=$(cat /sys/bus/event_source/devices/intel_pt/caps/power_event_trace)
+ if [ "${power_event}" != "1" ] ; then
+ echo "SKIP: power_event_trace is not supported"
+ return 2
+ fi
+ if ! perf_record_no_decode -o "${perfdatafile}" -a -e intel_pt/pwr_evt/u uname ; then
+ echo "perf record failed with pwr_evt"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_no_tnt()
+{
+ echo "--- Test with TNT packets disabled ---"
+ # Check if TNT disable is supported
+ notnt=$(cat /sys/bus/event_source/devices/intel_pt/caps/tnt_disable)
+ if [ "${notnt}" != "1" ] ; then
+ echo "SKIP: tnt_disable is not supported"
+ return 2
+ fi
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/notnt/u uname
+ # Should be no TNT packets
+ tnt_cnt=$(perf script -i "${perfdatafile}" -D | grep -c TNT)
+ if [ "${tnt_cnt}" -ne 0 ] ; then
+ echo "TNT packets still there after notnt"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_event_trace()
+{
+ echo "--- Test with event_trace ---"
+ # Check if event_trace is supported
+ event_trace=$(cat /sys/bus/event_source/devices/intel_pt/caps/event_trace)
+ if [ "${event_trace}" != 1 ] ; then
+ echo "SKIP: event_trace is not supported"
+ return 2
+ fi
+ if ! perf_record_no_decode -o "${perfdatafile}" -e intel_pt/event/u uname ; then
+ echo "perf record failed with event trace"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
count_result()
{
if [ "$1" -eq 2 ] ; then
@@ -265,13 +625,22 @@ count_result()
return
fi
err_cnt=$((err_cnt + 1))
- ret=0
}
ret=0
-test_system_wide_side_band || ret=$? ; count_result $ret
-test_per_thread "" "" || ret=$? ; count_result $ret
-test_per_thread "k" "(incl. kernel) " || ret=$? ; count_result $ret
+test_system_wide_side_band || ret=$? ; count_result $ret ; ret=0
+test_per_thread "" "" || ret=$? ; count_result $ret ; ret=0
+test_per_thread "k" "(incl. kernel) " || ret=$? ; count_result $ret ; ret=0
+test_jitdump || ret=$? ; count_result $ret ; ret=0
+test_packet_filter || ret=$? ; count_result $ret ; ret=0
+test_disable_branch || ret=$? ; count_result $ret ; ret=0
+test_time_cyc || ret=$? ; count_result $ret ; ret=0
+test_sample || ret=$? ; count_result $ret ; ret=0
+test_kernel_trace || ret=$? ; count_result $ret ; ret=0
+test_virtual_lbr || ret=$? ; count_result $ret ; ret=0
+test_power_event || ret=$? ; count_result $ret ; ret=0
+test_no_tnt || ret=$? ; count_result $ret ; ret=0
+test_event_trace || ret=$? ; count_result $ret ; ret=0
cleanup
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 815d235466d0..e315ecaec323 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -118,6 +118,8 @@ perf-$(CONFIG_AUXTRACE) += intel-pt.o
perf-$(CONFIG_AUXTRACE) += intel-bts.o
perf-$(CONFIG_AUXTRACE) += arm-spe.o
perf-$(CONFIG_AUXTRACE) += arm-spe-decoder/
+perf-$(CONFIG_AUXTRACE) += hisi-ptt.o
+perf-$(CONFIG_AUXTRACE) += hisi-ptt-decoder/
perf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
ifdef CONFIG_LIBOPENCSD
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index b59c278fe9ed..60d8beb662aa 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -52,6 +52,7 @@
#include "intel-pt.h"
#include "intel-bts.h"
#include "arm-spe.h"
+#include "hisi-ptt.h"
#include "s390-cpumsf.h"
#include "util/mmap.h"
@@ -1320,6 +1321,9 @@ int perf_event__process_auxtrace_info(struct perf_session *session,
case PERF_AUXTRACE_S390_CPUMSF:
err = s390_cpumsf_process_auxtrace_info(event, session);
break;
+ case PERF_AUXTRACE_HISI_PTT:
+ err = hisi_ptt_process_auxtrace_info(event, session);
+ break;
case PERF_AUXTRACE_UNKNOWN:
default:
return -EINVAL;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index cb8e0a01abb6..6a0f9b98f059 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -48,6 +48,7 @@ enum auxtrace_type {
PERF_AUXTRACE_CS_ETM,
PERF_AUXTRACE_ARM_SPE,
PERF_AUXTRACE_S390_CPUMSF,
+ PERF_AUXTRACE_HISI_PTT,
};
enum itrace_period_type {
diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
index 435a87556688..6a438e0102c5 100644
--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
@@ -43,6 +43,18 @@ struct {
__uint(value_size, sizeof(struct bpf_perf_event_value));
} cgrp_readings SEC(".maps");
+/* new kernel cgroup definition */
+struct cgroup___new {
+ int level;
+ struct cgroup *ancestors[];
+} __attribute__((preserve_access_index));
+
+/* old kernel cgroup definition */
+struct cgroup___old {
+ int level;
+ u64 ancestor_ids[];
+} __attribute__((preserve_access_index));
+
const volatile __u32 num_events = 1;
const volatile __u32 num_cpus = 1;
@@ -50,6 +62,21 @@ int enabled = 0;
int use_cgroup_v2 = 0;
int perf_subsys_id = -1;
+static inline __u64 get_cgroup_v1_ancestor_id(struct cgroup *cgrp, int level)
+{
+ /* recast pointer to capture new type for compiler */
+ struct cgroup___new *cgrp_new = (void *)cgrp;
+
+ if (bpf_core_field_exists(cgrp_new->ancestors)) {
+ return BPF_CORE_READ(cgrp_new, ancestors[level], kn, id);
+ } else {
+ /* recast pointer to capture old type for compiler */
+ struct cgroup___old *cgrp_old = (void *)cgrp;
+
+ return BPF_CORE_READ(cgrp_old, ancestor_ids[level]);
+ }
+}
+
static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
{
struct task_struct *p = (void *)bpf_get_current_task();
@@ -77,7 +104,7 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
break;
// convert cgroup-id to a map index
- cgrp_id = BPF_CORE_READ(cgrp, ancestors[i], kn, id);
+ cgrp_id = get_cgroup_v1_ancestor_id(cgrp, i);
elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id);
if (!elem)
continue;
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index b5c909546e3f..6af062d1c452 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -2,6 +2,8 @@
#ifndef __GENELF_H__
#define __GENELF_H__
+#include <linux/math.h>
+
/* genelf.c */
int jit_write_elf(int fd, uint64_t code_addr, const char *sym,
const void *code, int csize, void *debug, int nr_debug_entries,
@@ -76,6 +78,6 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#endif
/* The .text section is directly after the ELF header */
-#define GEN_ELF_TEXT_OFFSET sizeof(Elf_Ehdr)
+#define GEN_ELF_TEXT_OFFSET round_up(sizeof(Elf_Ehdr) + sizeof(Elf_Phdr), 16)
#endif
diff --git a/tools/perf/util/hisi-ptt-decoder/Build b/tools/perf/util/hisi-ptt-decoder/Build
new file mode 100644
index 000000000000..db3db8b75033
--- /dev/null
+++ b/tools/perf/util/hisi-ptt-decoder/Build
@@ -0,0 +1 @@
+perf-$(CONFIG_AUXTRACE) += hisi-ptt-pkt-decoder.o
diff --git a/tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.c b/tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.c
new file mode 100644
index 000000000000..a17c423a526d
--- /dev/null
+++ b/tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HiSilicon PCIe Trace and Tuning (PTT) support
+ * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <endian.h>
+#include <byteswap.h>
+#include <linux/bitops.h>
+#include <stdarg.h>
+
+#include "../color.h"
+#include "hisi-ptt-pkt-decoder.h"
+
+/*
+ * For 8DW format, the bit[31:11] of DW0 is always 0x1fffff, which can be
+ * used to distinguish the data format.
+ * 8DW format is like:
+ * bits [ 31:11 ][ 10:0 ]
+ * |---------------------------------------|-------------------|
+ * DW0 [ 0x1fffff ][ Reserved (0x7ff) ]
+ * DW1 [ Prefix ]
+ * DW2 [ Header DW0 ]
+ * DW3 [ Header DW1 ]
+ * DW4 [ Header DW2 ]
+ * DW5 [ Header DW3 ]
+ * DW6 [ Reserved (0x0) ]
+ * DW7 [ Time ]
+ *
+ * 4DW format is like:
+ * bits [31:30] [ 29:25 ][24][23][22][21][ 20:11 ][ 10:0 ]
+ * |-----|---------|---|---|---|---|-------------|-------------|
+ * DW0 [ Fmt ][ Type ][T9][T8][TH][SO][ Length ][ Time ]
+ * DW1 [ Header DW1 ]
+ * DW2 [ Header DW2 ]
+ * DW3 [ Header DW3 ]
+ */
+
+enum hisi_ptt_8dw_pkt_field_type {
+ HISI_PTT_8DW_CHK_AND_RSV0,
+ HISI_PTT_8DW_PREFIX,
+ HISI_PTT_8DW_HEAD0,
+ HISI_PTT_8DW_HEAD1,
+ HISI_PTT_8DW_HEAD2,
+ HISI_PTT_8DW_HEAD3,
+ HISI_PTT_8DW_RSV1,
+ HISI_PTT_8DW_TIME,
+ HISI_PTT_8DW_TYPE_MAX
+};
+
+enum hisi_ptt_4dw_pkt_field_type {
+ HISI_PTT_4DW_HEAD1,
+ HISI_PTT_4DW_HEAD2,
+ HISI_PTT_4DW_HEAD3,
+ HISI_PTT_4DW_TYPE_MAX
+};
+
+static const char * const hisi_ptt_8dw_pkt_field_name[] = {
+ [HISI_PTT_8DW_PREFIX] = "Prefix",
+ [HISI_PTT_8DW_HEAD0] = "Header DW0",
+ [HISI_PTT_8DW_HEAD1] = "Header DW1",
+ [HISI_PTT_8DW_HEAD2] = "Header DW2",
+ [HISI_PTT_8DW_HEAD3] = "Header DW3",
+ [HISI_PTT_8DW_TIME] = "Time"
+};
+
+static const char * const hisi_ptt_4dw_pkt_field_name[] = {
+ [HISI_PTT_4DW_HEAD1] = "Header DW1",
+ [HISI_PTT_4DW_HEAD2] = "Header DW2",
+ [HISI_PTT_4DW_HEAD3] = "Header DW3",
+};
+
+union hisi_ptt_4dw {
+ struct {
+ uint32_t format : 2;
+ uint32_t type : 5;
+ uint32_t t9 : 1;
+ uint32_t t8 : 1;
+ uint32_t th : 1;
+ uint32_t so : 1;
+ uint32_t len : 10;
+ uint32_t time : 11;
+ };
+ uint32_t value;
+};
+
+static void hisi_ptt_print_pkt(const unsigned char *buf, int pos, const char *desc)
+{
+ const char *color = PERF_COLOR_BLUE;
+ int i;
+
+ printf(".");
+ color_fprintf(stdout, color, " %08x: ", pos);
+ for (i = 0; i < HISI_PTT_FIELD_LENTH; i++)
+ color_fprintf(stdout, color, "%02x ", buf[pos + i]);
+ for (i = 0; i < HISI_PTT_MAX_SPACE_LEN; i++)
+ color_fprintf(stdout, color, " ");
+ color_fprintf(stdout, color, " %s\n", desc);
+}
+
+static int hisi_ptt_8dw_kpt_desc(const unsigned char *buf, int pos)
+{
+ int i;
+
+ for (i = 0; i < HISI_PTT_8DW_TYPE_MAX; i++) {
+ /* Do not show 8DW check field and reserved fields */
+ if (i == HISI_PTT_8DW_CHK_AND_RSV0 || i == HISI_PTT_8DW_RSV1) {
+ pos += HISI_PTT_FIELD_LENTH;
+ continue;
+ }
+
+ hisi_ptt_print_pkt(buf, pos, hisi_ptt_8dw_pkt_field_name[i]);
+ pos += HISI_PTT_FIELD_LENTH;
+ }
+
+ return hisi_ptt_pkt_size[HISI_PTT_8DW_PKT];
+}
+
+static void hisi_ptt_4dw_print_dw0(const unsigned char *buf, int pos)
+{
+ const char *color = PERF_COLOR_BLUE;
+ union hisi_ptt_4dw dw0;
+ int i;
+
+ dw0.value = *(uint32_t *)(buf + pos);
+ printf(".");
+ color_fprintf(stdout, color, " %08x: ", pos);
+ for (i = 0; i < HISI_PTT_FIELD_LENTH; i++)
+ color_fprintf(stdout, color, "%02x ", buf[pos + i]);
+ for (i = 0; i < HISI_PTT_MAX_SPACE_LEN; i++)
+ color_fprintf(stdout, color, " ");
+
+ color_fprintf(stdout, color,
+ " %s %x %s %x %s %x %s %x %s %x %s %x %s %x %s %x\n",
+ "Format", dw0.format, "Type", dw0.type, "T9", dw0.t9,
+ "T8", dw0.t8, "TH", dw0.th, "SO", dw0.so, "Length",
+ dw0.len, "Time", dw0.time);
+}
+
+static int hisi_ptt_4dw_kpt_desc(const unsigned char *buf, int pos)
+{
+ int i;
+
+ hisi_ptt_4dw_print_dw0(buf, pos);
+ pos += HISI_PTT_FIELD_LENTH;
+
+ for (i = 0; i < HISI_PTT_4DW_TYPE_MAX; i++) {
+ hisi_ptt_print_pkt(buf, pos, hisi_ptt_4dw_pkt_field_name[i]);
+ pos += HISI_PTT_FIELD_LENTH;
+ }
+
+ return hisi_ptt_pkt_size[HISI_PTT_4DW_PKT];
+}
+
+int hisi_ptt_pkt_desc(const unsigned char *buf, int pos, enum hisi_ptt_pkt_type type)
+{
+ if (type == HISI_PTT_8DW_PKT)
+ return hisi_ptt_8dw_kpt_desc(buf, pos);
+
+ return hisi_ptt_4dw_kpt_desc(buf, pos);
+}
diff --git a/tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.h b/tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.h
new file mode 100644
index 000000000000..e78f1b5bc836
--- /dev/null
+++ b/tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HiSilicon PCIe Trace and Tuning (PTT) support
+ * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
+ */
+
+#ifndef INCLUDE__HISI_PTT_PKT_DECODER_H__
+#define INCLUDE__HISI_PTT_PKT_DECODER_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define HISI_PTT_8DW_CHECK_MASK GENMASK(31, 11)
+#define HISI_PTT_IS_8DW_PKT GENMASK(31, 11)
+#define HISI_PTT_MAX_SPACE_LEN 10
+#define HISI_PTT_FIELD_LENTH 4
+
+enum hisi_ptt_pkt_type {
+ HISI_PTT_4DW_PKT,
+ HISI_PTT_8DW_PKT,
+ HISI_PTT_PKT_MAX
+};
+
+static int hisi_ptt_pkt_size[] = {
+ [HISI_PTT_4DW_PKT] = 16,
+ [HISI_PTT_8DW_PKT] = 32,
+};
+
+int hisi_ptt_pkt_desc(const unsigned char *buf, int pos, enum hisi_ptt_pkt_type type);
+
+#endif
diff --git a/tools/perf/util/hisi-ptt.c b/tools/perf/util/hisi-ptt.c
new file mode 100644
index 000000000000..45b614bb73bf
--- /dev/null
+++ b/tools/perf/util/hisi-ptt.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HiSilicon PCIe Trace and Tuning (PTT) support
+ * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <byteswap.h>
+#include <endian.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+#include <linux/zalloc.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "auxtrace.h"
+#include "color.h"
+#include "debug.h"
+#include "evsel.h"
+#include "hisi-ptt.h"
+#include "hisi-ptt-decoder/hisi-ptt-pkt-decoder.h"
+#include "machine.h"
+#include "session.h"
+#include "tool.h"
+#include <internal/lib.h>
+
+struct hisi_ptt {
+ struct auxtrace auxtrace;
+ u32 auxtrace_type;
+ struct perf_session *session;
+ struct machine *machine;
+ u32 pmu_type;
+};
+
+struct hisi_ptt_queue {
+ struct hisi_ptt *ptt;
+ struct auxtrace_buffer *buffer;
+};
+
+static enum hisi_ptt_pkt_type hisi_ptt_check_packet_type(unsigned char *buf)
+{
+ uint32_t head = *(uint32_t *)buf;
+
+ if ((HISI_PTT_8DW_CHECK_MASK & head) == HISI_PTT_IS_8DW_PKT)
+ return HISI_PTT_8DW_PKT;
+
+ return HISI_PTT_4DW_PKT;
+}
+
+static void hisi_ptt_dump(struct hisi_ptt *ptt __maybe_unused,
+ unsigned char *buf, size_t len)
+{
+ const char *color = PERF_COLOR_BLUE;
+ enum hisi_ptt_pkt_type type;
+ size_t pos = 0;
+ int pkt_len;
+
+ type = hisi_ptt_check_packet_type(buf);
+ len = round_down(len, hisi_ptt_pkt_size[type]);
+ color_fprintf(stdout, color, ". ... HISI PTT data: size %zu bytes\n",
+ len);
+
+ while (len > 0) {
+ pkt_len = hisi_ptt_pkt_desc(buf, pos, type);
+ if (!pkt_len)
+ color_fprintf(stdout, color, " Bad packet!\n");
+
+ pos += pkt_len;
+ len -= pkt_len;
+ }
+}
+
+static void hisi_ptt_dump_event(struct hisi_ptt *ptt, unsigned char *buf,
+ size_t len)
+{
+ printf(".\n");
+
+ hisi_ptt_dump(ptt, buf, len);
+}
+
+static int hisi_ptt_process_event(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_tool *tool __maybe_unused)
+{
+ return 0;
+}
+
+static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_tool *tool __maybe_unused)
+{
+ struct hisi_ptt *ptt = container_of(session->auxtrace, struct hisi_ptt,
+ auxtrace);
+ int fd = perf_data__fd(session->data);
+ int size = event->auxtrace.size;
+ void *data = malloc(size);
+ off_t data_offset;
+ int err;
+
+ if (!data)
+ return -errno;
+
+ if (perf_data__is_pipe(session->data)) {
+ data_offset = 0;
+ } else {
+ data_offset = lseek(fd, 0, SEEK_CUR);
+ if (data_offset == -1)
+ return -errno;
+ }
+
+ err = readn(fd, data, size);
+ if (err != (ssize_t)size) {
+ free(data);
+ return -errno;
+ }
+
+ if (dump_trace)
+ hisi_ptt_dump_event(ptt, data, size);
+
+ return 0;
+}
+
+static int hisi_ptt_flush(struct perf_session *session __maybe_unused,
+ struct perf_tool *tool __maybe_unused)
+{
+ return 0;
+}
+
+static void hisi_ptt_free_events(struct perf_session *session __maybe_unused)
+{
+}
+
+static void hisi_ptt_free(struct perf_session *session)
+{
+ struct hisi_ptt *ptt = container_of(session->auxtrace, struct hisi_ptt,
+ auxtrace);
+
+ session->auxtrace = NULL;
+ free(ptt);
+}
+
+static bool hisi_ptt_evsel_is_auxtrace(struct perf_session *session,
+ struct evsel *evsel)
+{
+ struct hisi_ptt *ptt = container_of(session->auxtrace, struct hisi_ptt, auxtrace);
+
+ return evsel->core.attr.type == ptt->pmu_type;
+}
+
+static void hisi_ptt_print_info(__u64 type)
+{
+ if (!dump_trace)
+ return;
+
+ fprintf(stdout, " PMU Type %" PRId64 "\n", (s64) type);
+}
+
+int hisi_ptt_process_auxtrace_info(union perf_event *event,
+ struct perf_session *session)
+{
+ struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
+ struct hisi_ptt *ptt;
+
+ if (auxtrace_info->header.size < HISI_PTT_AUXTRACE_PRIV_SIZE +
+ sizeof(struct perf_record_auxtrace_info))
+ return -EINVAL;
+
+ ptt = zalloc(sizeof(*ptt));
+ if (!ptt)
+ return -ENOMEM;
+
+ ptt->session = session;
+ ptt->machine = &session->machines.host; /* No kvm support */
+ ptt->auxtrace_type = auxtrace_info->type;
+ ptt->pmu_type = auxtrace_info->priv[0];
+
+ ptt->auxtrace.process_event = hisi_ptt_process_event;
+ ptt->auxtrace.process_auxtrace_event = hisi_ptt_process_auxtrace_event;
+ ptt->auxtrace.flush_events = hisi_ptt_flush;
+ ptt->auxtrace.free_events = hisi_ptt_free_events;
+ ptt->auxtrace.free = hisi_ptt_free;
+ ptt->auxtrace.evsel_is_auxtrace = hisi_ptt_evsel_is_auxtrace;
+ session->auxtrace = &ptt->auxtrace;
+
+ hisi_ptt_print_info(auxtrace_info->priv[0]);
+
+ return 0;
+}
diff --git a/tools/perf/util/hisi-ptt.h b/tools/perf/util/hisi-ptt.h
new file mode 100644
index 000000000000..2db9b4056214
--- /dev/null
+++ b/tools/perf/util/hisi-ptt.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HiSilicon PCIe Trace and Tuning (PTT) support
+ * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
+ */
+
+#ifndef INCLUDE__PERF_HISI_PTT_H__
+#define INCLUDE__PERF_HISI_PTT_H__
+
+#define HISI_PTT_PMU_NAME "hisi_ptt"
+#define HISI_PTT_AUXTRACE_PRIV_SIZE sizeof(u64)
+
+struct auxtrace_record *hisi_ptt_recording_init(int *err,
+ struct perf_pmu *hisi_ptt_pmu);
+
+int hisi_ptt_process_auxtrace_info(union perf_event *event,
+ struct perf_session *session);
+
+#endif
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index b34cb3dec1aa..e3548ddef254 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -4046,6 +4046,7 @@ static const char * const intel_pt_info_fmts[] = {
[INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
[INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
[INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
+ [INTEL_PT_MTC_FREQ_BITS] = " MTC freq bits %#"PRIx64"\n",
[INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
[INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
[INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
@@ -4060,8 +4061,12 @@ static void intel_pt_print_info(__u64 *arr, int start, int finish)
if (!dump_trace)
return;
- for (i = start; i <= finish; i++)
- fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
+ for (i = start; i <= finish; i++) {
+ const char *fmt = intel_pt_info_fmts[i];
+
+ if (fmt)
+ fprintf(stdout, fmt, arr[i]);
+ }
}
static void intel_pt_print_info_str(const char *name, const char *str)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 437389dacf48..5973f46c2375 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -246,6 +246,9 @@ __add_event(struct list_head *list, int *idx,
struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
+ if (pmu)
+ perf_pmu__warn_invalid_formats(pmu);
+
if (pmu && attr->type == PERF_TYPE_RAW)
perf_pmu__warn_invalid_config(pmu, attr->config, name);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 74a2cafb4e8d..03284059175f 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1005,6 +1005,23 @@ err:
return NULL;
}
+void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
+{
+ struct perf_pmu_format *format;
+
+ /* fake pmu doesn't have format list */
+ if (pmu == &perf_pmu__fake)
+ return;
+
+ list_for_each_entry(format, &pmu->format, list)
+ if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END) {
+ pr_warning("WARNING: '%s' format '%s' requires 'perf_event_attr::config%d'"
+ "which is not supported by this version of perf!\n",
+ pmu->name, format->name, format->value);
+ return;
+ }
+}
+
static struct perf_pmu *pmu_find(const char *name)
{
struct perf_pmu *pmu;
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index a7b0f9507510..68e15c38ae71 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -17,6 +17,7 @@ enum {
PERF_PMU_FORMAT_VALUE_CONFIG,
PERF_PMU_FORMAT_VALUE_CONFIG1,
PERF_PMU_FORMAT_VALUE_CONFIG2,
+ PERF_PMU_FORMAT_VALUE_CONFIG_END,
};
#define PERF_PMU_FORMAT_BITS 64
@@ -139,6 +140,7 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu);
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
const char *name);
+void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
bool perf_pmu__has_hybrid(void);
int perf_pmu__match(char *pattern, char *name, char *tok);
diff --git a/tools/perf/util/pmu.l b/tools/perf/util/pmu.l
index a15d9fbd7c0e..58b4926cfaca 100644
--- a/tools/perf/util/pmu.l
+++ b/tools/perf/util/pmu.l
@@ -27,8 +27,6 @@ num_dec [0-9]+
{num_dec} { return value(10); }
config { return PP_CONFIG; }
-config1 { return PP_CONFIG1; }
-config2 { return PP_CONFIG2; }
- { return '-'; }
: { return ':'; }
, { return ','; }
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
index 0dab0ec2eff7..e675d79a0274 100644
--- a/tools/perf/util/pmu.y
+++ b/tools/perf/util/pmu.y
@@ -18,7 +18,7 @@ do { \
%}
-%token PP_CONFIG PP_CONFIG1 PP_CONFIG2
+%token PP_CONFIG
%token PP_VALUE PP_ERROR
%type <num> PP_VALUE
%type <bits> bit_term
@@ -45,18 +45,11 @@ PP_CONFIG ':' bits
$3));
}
|
-PP_CONFIG1 ':' bits
+PP_CONFIG PP_VALUE ':' bits
{
ABORT_ON(perf_pmu__new_format(format, name,
- PERF_PMU_FORMAT_VALUE_CONFIG1,
- $3));
-}
-|
-PP_CONFIG2 ':' bits
-{
- ABORT_ON(perf_pmu__new_format(format, name,
- PERF_PMU_FORMAT_VALUE_CONFIG2,
- $3));
+ $2,
+ $4));
}
bits:
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 2a6b0bc648c4..69c58362c0ed 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -70,6 +70,7 @@ TEST_PROGS += io_uring_zerocopy_tx.sh
TEST_GEN_FILES += bind_bhash
TEST_GEN_PROGS += sk_bind_sendto_listen
TEST_GEN_PROGS += sk_connect_zero_addr
+TEST_PROGS += test_ingress_egress_chaining.sh
TEST_FILES := settings
diff --git a/tools/testing/selftests/net/test_ingress_egress_chaining.sh b/tools/testing/selftests/net/test_ingress_egress_chaining.sh
new file mode 100644
index 000000000000..08adff6bb3b6
--- /dev/null
+++ b/tools/testing/selftests/net/test_ingress_egress_chaining.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test runs a simple ingress tc setup between two veth pairs,
+# and chains a single egress rule to test ingress chaining to egress.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip
+fi
+
+needed_mods="act_mirred cls_flower sch_ingress"
+for mod in $needed_mods; do
+ modinfo $mod &>/dev/null || { echo "SKIP: Need act_mirred module"; exit $ksft_skip; }
+done
+
+ns="ns$((RANDOM%899+100))"
+veth1="veth1$((RANDOM%899+100))"
+veth2="veth2$((RANDOM%899+100))"
+peer1="peer1$((RANDOM%899+100))"
+peer2="peer2$((RANDOM%899+100))"
+ip_peer1=198.51.100.5
+ip_peer2=198.51.100.6
+
+function fail() {
+ echo "FAIL: $@" >> /dev/stderr
+ exit 1
+}
+
+function cleanup() {
+ killall -q -9 udpgso_bench_rx
+ ip link del $veth1 &> /dev/null
+ ip link del $veth2 &> /dev/null
+ ip netns del $ns &> /dev/null
+}
+trap cleanup EXIT
+
+function config() {
+ echo "Setup veth pairs [$veth1, $peer1], and veth pair [$veth2, $peer2]"
+ ip link add $veth1 type veth peer name $peer1
+ ip link add $veth2 type veth peer name $peer2
+ ip addr add $ip_peer1/24 dev $peer1
+ ip link set $peer1 up
+ ip netns add $ns
+ ip link set dev $peer2 netns $ns
+ ip netns exec $ns ip addr add $ip_peer2/24 dev $peer2
+ ip netns exec $ns ip link set $peer2 up
+ ip link set $veth1 up
+ ip link set $veth2 up
+
+ echo "Add tc filter ingress->egress forwarding $veth1 <-> $veth2"
+ tc qdisc add dev $veth2 ingress
+ tc qdisc add dev $veth1 ingress
+ tc filter add dev $veth2 ingress prio 1 proto all flower \
+ action mirred egress redirect dev $veth1
+ tc filter add dev $veth1 ingress prio 1 proto all flower \
+ action mirred egress redirect dev $veth2
+
+ echo "Add tc filter egress->ingress forwarding $peer1 -> $veth1, bypassing the veth pipe"
+ tc qdisc add dev $peer1 clsact
+ tc filter add dev $peer1 egress prio 20 proto ip flower \
+ action mirred ingress redirect dev $veth1
+}
+
+function test_run() {
+ echo "Run tcp traffic"
+ ./udpgso_bench_rx -t &
+ sleep 1
+ ip netns exec $ns timeout -k 2 10 ./udpgso_bench_tx -t -l 2 -4 -D $ip_peer1 || fail "traffic failed"
+ echo "Test passed"
+}
+
+config
+test_run
+trap - EXIT
+cleanup
diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c
index 7d722265dcd7..4adaad1b822f 100644
--- a/tools/testing/selftests/vm/hmm-tests.c
+++ b/tools/testing/selftests/vm/hmm-tests.c
@@ -1054,6 +1054,55 @@ TEST_F(hmm, migrate_fault)
hmm_buffer_free(buffer);
}
+TEST_F(hmm, migrate_release)
+{
+ struct hmm_buffer *buffer;
+ unsigned long npages;
+ unsigned long size;
+ unsigned long i;
+ int *ptr;
+ int ret;
+
+ npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
+ ASSERT_NE(npages, 0);
+ size = npages << self->page_shift;
+
+ buffer = malloc(sizeof(*buffer));
+ ASSERT_NE(buffer, NULL);
+
+ buffer->fd = -1;
+ buffer->size = size;
+ buffer->mirror = malloc(size);
+ ASSERT_NE(buffer->mirror, NULL);
+
+ buffer->ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
+ ASSERT_NE(buffer->ptr, MAP_FAILED);
+
+ /* Initialize buffer in system memory. */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ptr[i] = i;
+
+ /* Migrate memory to device. */
+ ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
+ ASSERT_EQ(ret, 0);
+ ASSERT_EQ(buffer->cpages, npages);
+
+ /* Check what the device read. */
+ for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ /* Release device memory. */
+ ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_RELEASE, buffer, npages);
+ ASSERT_EQ(ret, 0);
+
+ /* Fault pages back to system memory and check them. */
+ for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
+ ASSERT_EQ(ptr[i], i);
+
+ hmm_buffer_free(buffer);
+}
+
/*
* Migrate anonymous shared memory to device private memory.
*/
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 74babdbc02e5..297f250c1d95 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -774,7 +774,27 @@ static void uffd_handle_page_fault(struct uffd_msg *msg,
continue_range(uffd, msg->arg.pagefault.address, page_size);
stats->minor_faults++;
} else {
- /* Missing page faults */
+ /*
+ * Missing page faults.
+ *
+ * Here we force a write check for each of the missing mode
+ * faults. It's guaranteed because the only threads that
+ * will trigger uffd faults are the locking threads, and
+ * their first instruction to touch the missing page will
+ * always be pthread_mutex_lock().
+ *
+ * Note that here we relied on an NPTL glibc impl detail to
+ * always read the lock type at the entry of the lock op
+ * (pthread_mutex_t.__data.__type, offset 0x10) before
+ * doing any locking operations to guarantee that. It's
+ * actually not good to rely on this impl detail because
+ * logically a pthread-compatible lib can implement the
+ * locks without types and we can fail when linking with
+ * them. However since we used to find bugs with this
+ * strict check we still keep it around. Hopefully this
+ * could be a good hint when it fails again. If one day
+ * it'll break on some other impl of glibc we'll revisit.
+ */
if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
err("unexpected write fault");