summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c20
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c131
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h5
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c1
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c142
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h3
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c60
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c38
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c6
-rw-r--r--drivers/accel/ivpu/ivpu_job.c162
-rw-r--r--drivers/accel/ivpu/ivpu_job.h3
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c58
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h1
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c9
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c91
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h6
-rw-r--r--drivers/acpi/apei/ghes.c89
-rw-r--r--drivers/acpi/ec.c112
-rw-r--r--drivers/android/binder.c10
-rw-r--r--drivers/ata/ahci.c52
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_ceva.c125
-rw-r--r--drivers/ata/libata-core.c59
-rw-r--r--drivers/ata/libata-sata.c2
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/base/arch_topology.c13
-rw-r--r--drivers/base/core.c26
-rw-r--r--drivers/base/regmap/regmap-kunit.c57
-rw-r--r--drivers/block/aoe/aoeblk.c5
-rw-r--r--drivers/block/rbd.c32
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/hci_bcm4377.c3
-rw-r--r--drivers/bluetooth/hci_qca.c22
-rw-r--r--drivers/bus/imx-weim.c2
-rw-r--r--drivers/cache/ax45mp_cache.c4
-rw-r--r--drivers/clk/samsung/clk-gs101.c2
-rw-r--r--drivers/connector/cn_proc.c5
-rw-r--r--drivers/cpufreq/amd-pstate.c7
-rw-r--r--drivers/cpufreq/intel_pstate.c58
-rw-r--r--drivers/cxl/acpi.c46
-rw-r--r--drivers/cxl/core/cdat.c86
-rw-r--r--drivers/cxl/core/mbox.c4
-rw-r--r--drivers/cxl/core/memdev.c63
-rw-r--r--drivers/cxl/core/pci.c49
-rw-r--r--drivers/cxl/core/region.c66
-rw-r--r--drivers/cxl/core/trace.h6
-rw-r--r--drivers/cxl/cxl.h2
-rw-r--r--drivers/cxl/cxlmem.h10
-rw-r--r--drivers/cxl/mem.c56
-rw-r--r--drivers/cxl/pci.c83
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c7
-rw-r--r--drivers/dma/at_hdmac.c32
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c17
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-core.c39
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-regs.h2
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c10
-rw-r--r--drivers/dma/fsl-edma-common.c2
-rw-r--r--drivers/dma/fsl-edma-common.h5
-rw-r--r--drivers/dma/fsl-edma-main.c4
-rw-r--r--drivers/dma/fsl-qdma.c71
-rw-r--r--drivers/dma/idxd/cdev.c2
-rw-r--r--drivers/dma/idxd/debugfs.c2
-rw-r--r--drivers/dma/idxd/idxd.h1
-rw-r--r--drivers/dma/idxd/init.c15
-rw-r--r--drivers/dma/idxd/irq.c3
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c2
-rw-r--r--drivers/dma/ti/edma.c10
-rw-r--r--drivers/dma/ti/k3-udma.c10
-rw-r--r--drivers/dpll/dpll_core.c75
-rw-r--r--drivers/dpll/dpll_core.h6
-rw-r--r--drivers/dpll/dpll_netlink.c77
-rw-r--r--drivers/dpll/dpll_nl.c4
-rw-r--r--drivers/dpll/dpll_nl.h2
-rw-r--r--drivers/firewire/core-card.c32
-rw-r--r--drivers/firewire/core-device.c18
-rw-r--r--drivers/firmware/arm_ffa/driver.c85
-rw-r--r--drivers/firmware/arm_scmi/clock.c5
-rw-r--r--drivers/firmware/arm_scmi/common.h1
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c14
-rw-r--r--drivers/firmware/arm_scmi/perf.c23
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c12
-rw-r--r--drivers/firmware/arm_scmi/shmem.c8
-rw-r--r--drivers/firmware/efi/arm-runtime.c2
-rw-r--r--drivers/firmware/efi/capsule-loader.c2
-rw-r--r--drivers/firmware/efi/cper.c19
-rw-r--r--drivers/firmware/efi/efi-init.c19
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/alignedmem.c1
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c2
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c12
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c25
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.h4
-rw-r--r--drivers/firmware/efi/libstub/zboot.c2
-rw-r--r--drivers/firmware/efi/riscv-runtime.c2
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c2
-rw-r--r--drivers/firmware/sysfb.c2
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-eic-sprd.c32
-rw-r--r--drivers/gpio/gpiolib-acpi.c14
-rw-r--r--drivers/gpio/gpiolib.c23
-rw-r--r--drivers/gpu/drm/Kconfig5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c105
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h47
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/amdgpu_reg_state.h2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c47
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c73
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c71
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c7
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h2
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c70
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c23
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c32
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c42
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/drm_buddy.c22
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c3
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c20
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c32
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c1
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c7
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c48
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c85
-rw-r--r--drivers/gpu/drm/panel/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c6
-rw-r--r--drivers/gpu/drm/tegra/drm.c26
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c306
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c21
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c35
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h4
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h4
-rw-r--r--drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h8
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h6
-rw-r--r--drivers/gpu/drm/xe/abi/guc_messages_abi.h20
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h13
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c1
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c3
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c11
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h1
-rw-r--r--drivers/gpu/drm/xe/xe_device.c41
-rw-r--r--drivers/gpu/drm/xe/xe_device.h4
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_display.c6
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c12
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c10
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c129
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h20
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c6
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c2
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c26
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c42
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c6
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c82
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h19
-rw-r--r--drivers/gpu/drm/xe/xe_query.c50
-rw-r--r--drivers/gpu/drm/xe/xe_range_fence.c7
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c1
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c60
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h9
-rw-r--r--drivers/gpu/drm/xe/xe_sync_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h59
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c353
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h16
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h33
-rw-r--r--drivers/gpu/host1x/dev.c15
-rw-r--r--drivers/gpu/host1x/dev.h6
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c101
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.h4
-rw-r--r--drivers/hid/bpf/hid_bpf_jmp_table.c40
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-logitech-hidpp.c15
-rw-r--r--drivers/hid/hid-multitouch.c4
-rw-r--r--drivers/hid/hid-nvidia-shield.c4
-rw-r--r--drivers/hid/hid-steam.c36
-rw-r--r--drivers/hid/hidraw.c7
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c4
-rw-r--r--drivers/hid/wacom_sys.c63
-rw-r--r--drivers/hid/wacom_wac.c9
-rw-r--r--drivers/hv/channel.c176
-rw-r--r--drivers/hv/hv_util.c31
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c7
-rw-r--r--drivers/hwmon/coretemp.c42
-rw-r--r--drivers/hwmon/gigabyte_waterforce.c2
-rw-r--r--drivers/hwmon/nct6775-core.c14
-rw-r--r--drivers/hwmon/pmbus/mp2975.c16
-rw-r--r--drivers/i2c/busses/Makefile6
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c6
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c14
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/adc/ad4130.c12
-rw-r--r--drivers/iio/adc/ad7091r8.c2
-rw-r--r--drivers/iio/humidity/Kconfig12
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/hdc3020.c2
-rw-r--r--drivers/iio/imu/bno055/Kconfig1
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/iio/light/hid-sensor-als.c1
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c10
-rw-r--r--drivers/iio/pressure/bmp280-spi.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c3
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c6
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/irdma/defs.h1
-rw-r--r--drivers/infiniband/hw/irdma/hw.c8
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c9
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c6
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c11
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c17
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/atkbd.c14
-rw-r--r--drivers/input/mouse/bcm5974.c20
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h14
-rw-r--r--drivers/input/touchscreen/goodix.c3
-rw-r--r--drivers/interconnect/qcom/sc8180x.c1
-rw-r--r--drivers/interconnect/qcom/sm8550.c1
-rw-r--r--drivers/interconnect/qcom/sm8650.c2
-rw-r--r--drivers/interconnect/qcom/x1e80100.c1
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c38
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c17
-rw-r--r--drivers/iommu/intel/iommu.c214
-rw-r--r--drivers/iommu/intel/iommu.h7
-rw-r--r--drivers/iommu/intel/nested.c16
-rw-r--r--drivers/iommu/intel/pasid.c5
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/iommu-sva.c17
-rw-r--r--drivers/iommu/iommu.c18
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c3
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c9
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h1
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c68
-rw-r--r--drivers/iommu/iommufd/selftest.c148
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c5
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c64
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c2
-rw-r--r--drivers/irqchip/irq-mbigen.c8
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c4
-rw-r--r--drivers/irqchip/irq-sifive-plic.c8
-rw-r--r--drivers/md/dm-core.h2
-rw-r--r--drivers/md/dm-crypt.c137
-rw-r--r--drivers/md/dm-integrity.c93
-rw-r--r--drivers/md/dm-ioctl.c3
-rw-r--r--drivers/md/dm-stats.c9
-rw-r--r--drivers/md/dm-table.c9
-rw-r--r--drivers/md/dm-verity-target.c114
-rw-r--r--drivers/md/dm-verity.h11
-rw-r--r--drivers/md/dm-writecache.c8
-rw-r--r--drivers/md/md.c70
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid10.c16
-rw-r--r--drivers/md/raid5.c29
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c55
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c3
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c3
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c24
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c3
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/rc/bpf-lirc.c6
-rw-r--r--drivers/media/rc/ir_toy.c2
-rw-r--r--drivers/media/rc/lirc_dev.c5
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/misc/fastrpc.c2
-rw-r--r--drivers/misc/open-dice.c2
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c6
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c24
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c30
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c48
-rw-r--r--drivers/mtd/mtdcore.c1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c13
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c6
-rw-r--r--drivers/net/arcnet/arc-rawmode.c1
-rw-r--r--drivers/net/arcnet/arc-rimi.c1
-rw-r--r--drivers/net/arcnet/capmode.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c1
-rw-r--r--drivers/net/arcnet/com20020.c1
-rw-r--r--drivers/net/arcnet/com20020_cs.c1
-rw-r--r--drivers/net/arcnet/com90io.c1
-rw-r--r--drivers/net/arcnet/com90xx.c1
-rw-r--r--drivers/net/arcnet/rfc1051.c1
-rw-r--r--drivers/net/arcnet/rfc1201.c1
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/can/dev/netlink.c2
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c1
-rw-r--r--drivers/net/dsa/mt7530.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c3
-rw-r--r--drivers/net/ethernet/8390/8390.c1
-rw-r--r--drivers/net/ethernet/8390/8390p.c1
-rw-r--r--drivers/net/ethernet/8390/apne.c1
-rw-r--r--drivers/net/ethernet/8390/hydra.c1
-rw-r--r--drivers/net/ethernet/8390/stnic.c1
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c1
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c64
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c46
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h2
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c4
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c16
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/fw.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c6
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c1
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c3
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c33
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c18
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c1
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h20
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c71
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c38
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c86
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c35
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c3
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c67
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c21
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c46
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c19
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c9
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c166
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c3
-rw-r--r--drivers/net/ethernet/ti/cpts.c17
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c12
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/fjes/fjes_hw.c37
-rw-r--r--drivers/net/gtp.c14
-rw-r--r--drivers/net/hyperv/netvsc.c5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c86
-rw-r--r--drivers/net/ieee802154/fakelb.c1
-rw-r--r--drivers/net/ipa/ipa_interrupt.c2
-rw-r--r--drivers/net/ipvlan/ipvtap.c1
-rw-r--r--drivers/net/macsec.c25
-rw-r--r--drivers/net/netdevsim/dev.c8
-rw-r--r--drivers/net/phy/mdio_devres.c1
-rw-r--r--drivers/net/phy/mediatek-ge-soc.c147
-rw-r--r--drivers/net/phy/micrel.c11
-rw-r--r--drivers/net/phy/realtek.c4
-rw-r--r--drivers/net/plip/plip.c1
-rw-r--r--drivers/net/ppp/bsd_comp.c1
-rw-r--r--drivers/net/ppp/ppp_async.c5
-rw-r--r--drivers/net/ppp/ppp_deflate.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c1
-rw-r--r--drivers/net/ppp/ppp_synctty.c1
-rw-r--r--drivers/net/ppp/pppoe.c23
-rw-r--r--drivers/net/tun.c11
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/lan78xx.c5
-rw-r--r--drivers/net/usb/smsc95xx.c5
-rw-r--r--drivers/net/veth.c40
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c25
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c12
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c69
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c1
-rw-r--r--drivers/net/xen-netback/netback.c107
-rw-r--r--drivers/nvme/common/auth.c1
-rw-r--r--drivers/nvme/common/keyring.c1
-rw-r--r--drivers/nvme/host/apple.c13
-rw-r--r--drivers/nvme/host/auth.c19
-rw-r--r--drivers/nvme/host/constants.c10
-rw-r--r--drivers/nvme/host/core.c95
-rw-r--r--drivers/nvme/host/fabrics.c20
-rw-r--r--drivers/nvme/host/fabrics.h8
-rw-r--r--drivers/nvme/host/fc.c60
-rw-r--r--drivers/nvme/host/ioctl.c2
-rw-r--r--drivers/nvme/host/multipath.c15
-rw-r--r--drivers/nvme/host/nvme.h62
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/rdma.c23
-rw-r--r--drivers/nvme/host/sysfs.c64
-rw-r--r--drivers/nvme/host/tcp.c11
-rw-r--r--drivers/nvme/target/core.c5
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c4
-rw-r--r--drivers/nvme/target/fc.c186
-rw-r--r--drivers/nvme/target/fcloop.c7
-rw-r--r--drivers/nvme/target/loop.c9
-rw-r--r--drivers/nvme/target/rdma.c1
-rw-r--r--drivers/nvme/target/tcp.c2
-rw-r--r--drivers/nvmem/core.c5
-rw-r--r--drivers/of/property.c65
-rw-r--r--drivers/of/unittest.c12
-rw-r--r--drivers/pci/bus.c49
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c10
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c2
-rw-r--r--drivers/pci/msi/irqdomain.c2
-rw-r--r--drivers/pci/pci.c115
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/aspm.c13
-rw-r--r--drivers/perf/arm-cmn.c11
-rw-r--r--drivers/perf/cxl_pmu.c12
-rw-r--r--drivers/perf/riscv_pmu.c18
-rw-r--r--drivers/perf/riscv_pmu_legacy.c10
-rw-r--r--drivers/perf/riscv_pmu_sbi.c8
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c2
-rw-r--r--drivers/phy/microchip/lan966x_serdes.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c166
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c32
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c4
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c4
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c67
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig1
-rw-r--r--drivers/platform/x86/amd/pmf/core.c17
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h5
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c36
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c82
-rw-r--r--drivers/platform/x86/intel/ifs/load.c3
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c82
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h32
-rw-r--r--drivers/platform/x86/intel/vbtn.c3
-rw-r--r--drivers/platform/x86/intel/wmi/sbl-fw-update.c4
-rw-r--r--drivers/platform/x86/p2sb.c177
-rw-r--r--drivers/platform/x86/serdev_helpers.h80
-rw-r--r--drivers/platform/x86/think-lmi.c20
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c64
-rw-r--r--drivers/platform/x86/wmi.c181
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c38
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c1
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h1
-rw-r--r--drivers/pmdomain/arm/scmi_perf_domain.c3
-rw-r--r--drivers/pmdomain/core.c2
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c15
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c7
-rw-r--r--drivers/pmdomain/renesas/r8a77980-sysc.c3
-rw-r--r--drivers/power/supply/Kconfig1
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c4
-rw-r--r--drivers/power/supply/qcom_battmgr.c109
-rw-r--r--drivers/regulator/max5970-regulator.c10
-rw-r--r--drivers/regulator/pwm-regulator.c43
-rw-r--r--drivers/regulator/ti-abb-regulator.c22
-rw-r--r--drivers/s390/cio/device_ops.c6
-rw-r--r--drivers/s390/net/qeth_l3_main.c9
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c20
-rw-r--r--drivers/scsi/fnic/fnic.h3
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c5
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/initio.c3
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/scsi.c22
-rw-r--r--drivers/scsi/scsi_error.c9
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/sd.c26
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c5
-rw-r--r--drivers/scsi/storvsc_drv.c12
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/soc/apple/mailbox.c6
-rw-r--r--drivers/soc/microchip/Kconfig2
-rw-r--r--drivers/soc/qcom/pmic_glink.c21
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c16
-rw-r--r--drivers/spi/spi-bcm-qspi.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c33
-rw-r--r--drivers/spi/spi-cadence.c17
-rw-r--r--drivers/spi/spi-cs42l43.c5
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c5
-rw-r--r--drivers/spi/spi-imx.c13
-rw-r--r--drivers/spi/spi-intel-pci.c3
-rw-r--r--drivers/spi/spi-mxs.c3
-rw-r--r--drivers/spi/spi-omap2-mcspi.c137
-rw-r--r--drivers/spi/spi-ppc4xx.c7
-rw-r--r--drivers/spi/spi-sh-msiof.c16
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c58
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c52
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c59
-rw-r--r--drivers/target/target_core_configfs.c48
-rw-r--r--drivers/target/target_core_pscsi.c9
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c32
-rw-r--r--drivers/thunderbolt/tb_regs.h2
-rw-r--r--drivers/thunderbolt/usb4.c2
-rw-r--r--drivers/tty/hvc/Kconfig8
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c60
-rw-r--r--drivers/tty/serial/max310x.c53
-rw-r--r--drivers/tty/serial/mxs-auart.c5
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c4
-rw-r--r--drivers/ufs/core/ufshcd.c7
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c8
-rw-r--r--drivers/usb/cdns3/core.c1
-rw-r--r--drivers/usb/cdns3/drd.c13
-rw-r--r--drivers/usb/cdns3/drd.h6
-rw-r--r--drivers/usb/cdns3/host.c16
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/core.c44
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/hub.c46
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c17
-rw-r--r--drivers/usb/dwc3/gadget.h2
-rw-r--r--drivers/usb/dwc3/host.c4
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c20
-rw-r--r--drivers/usb/gadget/function/f_ncm.c18
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c3
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/host/uhci-grlib.c1
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c80
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/roles/class.c29
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/storage/scsiglue.c7
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c6
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c88
-rw-r--r--drivers/video/fbdev/core/fbcon.c11
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c3
-rw-r--r--drivers/video/fbdev/sis/sis_main.c2
-rw-r--r--drivers/video/fbdev/stifb.c2
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c1
-rw-r--r--drivers/xen/events/events_base.c8
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/privcmd.c15
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c15
867 files changed, 9553 insertions, 5384 deletions
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index 19035230563d..7cb962e21453 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -102,7 +102,7 @@ static int reset_pending_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = seq_to_ivpu(s);
- seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset));
+ seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_pending));
return 0;
}
@@ -130,7 +130,9 @@ dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size
fw->dvfs_mode = dvfs_mode;
- ivpu_pm_schedule_recovery(vdev);
+ ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+ if (ret)
+ return ret;
return size;
}
@@ -190,7 +192,10 @@ fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf,
return ret;
ivpu_hw_profiling_freq_drive(vdev, enable);
- ivpu_pm_schedule_recovery(vdev);
+
+ ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+ if (ret)
+ return ret;
return size;
}
@@ -301,11 +306,18 @@ static ssize_t
ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{
struct ivpu_device *vdev = file->private_data;
+ int ret;
if (!size)
return -EINVAL;
- ivpu_pm_schedule_recovery(vdev);
+ ret = ivpu_rpm_get(vdev);
+ if (ret)
+ return ret;
+
+ ivpu_pm_trigger_recovery(vdev, "debugfs");
+ flush_work(&vdev->pm->recovery_work);
+ ivpu_rpm_put(vdev);
return size;
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 64927682161b..4b0640226986 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -6,6 +6,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_accel.h>
#include <drm/drm_file.h>
@@ -17,6 +18,7 @@
#include "ivpu_debugfs.h"
#include "ivpu_drv.h"
#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
@@ -65,22 +67,20 @@ struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
return file_priv;
}
-struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
+static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
{
- struct ivpu_file_priv *file_priv;
-
- xa_lock_irq(&vdev->context_xa);
- file_priv = xa_load(&vdev->context_xa, id);
- /* file_priv may still be in context_xa during file_priv_release() */
- if (file_priv && !kref_get_unless_zero(&file_priv->ref))
- file_priv = NULL;
- xa_unlock_irq(&vdev->context_xa);
-
- if (file_priv)
- ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
- file_priv->ctx.id, kref_read(&file_priv->ref));
-
- return file_priv;
+ mutex_lock(&file_priv->lock);
+ if (file_priv->bound) {
+ ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
+
+ ivpu_cmdq_release_all_locked(file_priv);
+ ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+ ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
+ ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+ file_priv->bound = false;
+ drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
+ }
+ mutex_unlock(&file_priv->lock);
}
static void file_priv_release(struct kref *ref)
@@ -88,13 +88,15 @@ static void file_priv_release(struct kref *ref)
struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
struct ivpu_device *vdev = file_priv->vdev;
- ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
+ ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
+ file_priv->ctx.id, (bool)file_priv->bound);
+
+ pm_runtime_get_sync(vdev->drm.dev);
+ mutex_lock(&vdev->context_list_lock);
+ file_priv_unbind(vdev, file_priv);
+ mutex_unlock(&vdev->context_list_lock);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
- ivpu_cmdq_release_all(file_priv);
- ivpu_jsm_context_release(vdev, file_priv->ctx.id);
- ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx);
- ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
- drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
}
@@ -176,9 +178,6 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user.start;
break;
- case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
- args->value = file_priv->priority;
- break;
case DRM_IVPU_PARAM_CONTEXT_ID:
args->value = file_priv->ctx.id;
break;
@@ -218,17 +217,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_param *args = data;
int ret = 0;
switch (args->param) {
- case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
- if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
- file_priv->priority = args->value;
- else
- ret = -EINVAL;
- break;
default:
ret = -EINVAL;
}
@@ -241,50 +233,53 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_file_priv *file_priv;
u32 ctx_id;
- void *old;
- int ret;
+ int idx, ret;
- ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
- if (ret) {
- ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
- return ret;
- }
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv) {
ret = -ENOMEM;
- goto err_xa_erase;
+ goto err_dev_exit;
}
file_priv->vdev = vdev;
- file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
+ file_priv->bound = true;
kref_init(&file_priv->ref);
mutex_init(&file_priv->lock);
+ mutex_lock(&vdev->context_list_lock);
+
+ ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
+ vdev->context_xa_limit, GFP_KERNEL);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
+ goto err_unlock;
+ }
+
ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
if (ret)
- goto err_mutex_destroy;
+ goto err_xa_erase;
- old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
- if (xa_is_err(old)) {
- ret = xa_err(old);
- ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
- goto err_ctx_fini;
- }
+ mutex_unlock(&vdev->context_list_lock);
+ drm_dev_exit(idx);
+
+ file->driver_priv = file_priv;
ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
ctx_id, current->comm, task_pid_nr(current));
- file->driver_priv = file_priv;
return 0;
-err_ctx_fini:
- ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
-err_mutex_destroy:
- mutex_destroy(&file_priv->lock);
- kfree(file_priv);
err_xa_erase:
xa_erase_irq(&vdev->context_xa, ctx_id);
+err_unlock:
+ mutex_unlock(&vdev->context_list_lock);
+ mutex_destroy(&file_priv->lock);
+ kfree(file_priv);
+err_dev_exit:
+ drm_dev_exit(idx);
return ret;
}
@@ -340,8 +335,6 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
if (!ret)
ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
- else
- ivpu_hw_diagnose_failure(vdev);
return ret;
}
@@ -369,6 +362,9 @@ int ivpu_boot(struct ivpu_device *vdev)
ret = ivpu_wait_for_ready(vdev);
if (ret) {
ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_mmu_evtq_dump(vdev);
+ ivpu_fw_log_dump(vdev);
return ret;
}
@@ -484,9 +480,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
- /* VPU 37XX does not require 10m D3hot delay */
- if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
- pdev->d3hot_delay = 0;
+ /* NPU does not require 10m D3hot delay */
+ pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev);
if (ret) {
@@ -540,6 +535,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
+ ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
+ if (ret)
+ goto err_xa_destroy;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
if (ret)
goto err_xa_destroy;
@@ -611,14 +610,30 @@ err_xa_destroy:
return ret;
}
+static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
+{
+ struct ivpu_file_priv *file_priv;
+ unsigned long ctx_id;
+
+ mutex_lock(&vdev->context_list_lock);
+
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv)
+ file_priv_unbind(vdev, file_priv);
+
+ mutex_unlock(&vdev->context_list_lock);
+}
+
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_pm_disable(vdev);
ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
+ ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev);
ivpu_pm_cancel_recovery(vdev);
+ ivpu_bo_unbind_all_user_contexts(vdev);
ivpu_ipc_fini(vdev);
ivpu_fw_fini(vdev);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index ebc4b84f27b2..069ace4adb2d 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -56,6 +56,7 @@
#define IVPU_DBG_JSM BIT(10)
#define IVPU_DBG_KREF BIT(11)
#define IVPU_DBG_RPM BIT(12)
+#define IVPU_DBG_MMU_MAP BIT(13)
#define ivpu_err(vdev, fmt, ...) \
drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
@@ -114,6 +115,7 @@ struct ivpu_device {
struct ivpu_mmu_context gctx;
struct ivpu_mmu_context rctx;
+ struct mutex context_list_lock; /* Protects user context addition/removal */
struct xarray context_xa;
struct xa_limit context_xa_limit;
@@ -145,8 +147,8 @@ struct ivpu_file_priv {
struct mutex lock; /* Protects cmdq */
struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
struct ivpu_mmu_context ctx;
- u32 priority;
bool has_mmu_faults;
+ bool bound;
};
extern int ivpu_dbg_mask;
@@ -162,7 +164,6 @@ extern bool ivpu_disable_mmu_cont_pages;
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
-struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id);
void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 6576232f3e67..5fa8bd4603d5 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -222,7 +222,6 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data;
if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) ||
- (ivpu_hw_gen(vdev) > IVPU_HW_37XX) ||
(ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE))
vdev->wa.disable_d0i3_msg = true;
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 1dda4f38ea25..e9ddbe9f50eb 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -24,14 +24,11 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
- if (bo->ctx)
- ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u ctx %d vpu_addr 0x%llx mmu_mapped %d\n",
- action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
- bo->handle, bo->ctx->id, bo->vpu_addr, bo->mmu_mapped);
- else
- ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u (not added to context)\n",
- action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
- bo->handle);
+ ivpu_dbg(vdev, BO,
+ "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
+ action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
+ (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
+ (bool)bo->base.base.import_attach);
}
/*
@@ -49,12 +46,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
mutex_lock(&bo->lock);
ivpu_dbg_bo(vdev, bo, "pin");
-
- if (!bo->ctx) {
- ivpu_err(vdev, "vpu_addr not allocated for BO %d\n", bo->handle);
- ret = -EINVAL;
- goto unlock;
- }
+ drm_WARN_ON(&vdev->drm, !bo->ctx);
if (!bo->mmu_mapped) {
struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
@@ -85,7 +77,10 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
const struct ivpu_addr_range *range)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- int ret;
+ int idx, ret;
+
+ if (!drm_dev_enter(&vdev->drm, &idx))
+ return -ENODEV;
mutex_lock(&bo->lock);
@@ -101,6 +96,8 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
mutex_unlock(&bo->lock);
+ drm_dev_exit(idx);
+
return ret;
}
@@ -108,11 +105,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- lockdep_assert_held(&bo->lock);
-
- ivpu_dbg_bo(vdev, bo, "unbind");
-
- /* TODO: dma_unmap */
+ lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -124,19 +117,23 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
if (bo->ctx) {
ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node);
- bo->vpu_addr = 0;
bo->ctx = NULL;
}
-}
-static void ivpu_bo_unbind(struct ivpu_bo *bo)
-{
- mutex_lock(&bo->lock);
- ivpu_bo_unbind_locked(bo);
- mutex_unlock(&bo->lock);
+ if (bo->base.base.import_attach)
+ return;
+
+ dma_resv_lock(bo->base.base.resv, NULL);
+ if (bo->base.sgt) {
+ dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->base.sgt);
+ kfree(bo->base.sgt);
+ bo->base.sgt = NULL;
+ }
+ dma_resv_unlock(bo->base.base.resv);
}
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
struct ivpu_bo *bo;
@@ -146,8 +143,10 @@ void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
mutex_lock(&bo->lock);
- if (bo->ctx == ctx)
+ if (bo->ctx == ctx) {
+ ivpu_dbg_bo(vdev, bo, "unbind");
ivpu_bo_unbind_locked(bo);
+ }
mutex_unlock(&bo->lock);
}
mutex_unlock(&vdev->bo_list_lock);
@@ -199,9 +198,6 @@ ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock);
- ivpu_dbg(vdev, BO, "create: vpu_addr 0x%llx size %zu flags 0x%x\n",
- bo->vpu_addr, bo->base.base.size, flags);
-
return bo;
}
@@ -212,6 +208,12 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
struct ivpu_bo *bo = to_ivpu_bo(obj);
struct ivpu_addr_range *range;
+ if (bo->ctx) {
+ ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
+ file_priv->ctx.id, bo->ctx->id);
+ return -EALREADY;
+ }
+
if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
range = &vdev->hw->ranges.shave;
else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
@@ -227,62 +229,24 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
struct ivpu_device *vdev = to_ivpu_device(obj->dev);
struct ivpu_bo *bo = to_ivpu_bo(obj);
+ ivpu_dbg_bo(vdev, bo, "free");
+
mutex_lock(&vdev->bo_list_lock);
list_del(&bo->bo_list_node);
mutex_unlock(&vdev->bo_list_lock);
drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
- ivpu_dbg_bo(vdev, bo, "free");
-
- ivpu_bo_unbind(bo);
+ ivpu_bo_unbind_locked(bo);
mutex_destroy(&bo->lock);
drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
drm_gem_shmem_free(&bo->base);
}
-static const struct dma_buf_ops ivpu_bo_dmabuf_ops = {
- .cache_sgt_mapping = true,
- .attach = drm_gem_map_attach,
- .detach = drm_gem_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
- .release = drm_gem_dmabuf_release,
- .mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
-};
-
-static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags)
-{
- struct drm_device *dev = obj->dev;
- struct dma_buf_export_info exp_info = {
- .exp_name = KBUILD_MODNAME,
- .owner = dev->driver->fops->owner,
- .ops = &ivpu_bo_dmabuf_ops,
- .size = obj->size,
- .flags = flags,
- .priv = obj,
- .resv = obj->resv,
- };
- void *sgt;
-
- /*
- * Make sure that pages are allocated and dma-mapped before exporting the bo.
- * DMA-mapping is required if the bo will be imported to the same device.
- */
- sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj));
- if (IS_ERR(sgt))
- return sgt;
-
- return drm_gem_dmabuf_export(dev, &exp_info);
-}
-
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_bo_free,
.open = ivpu_bo_open,
- .export = ivpu_bo_export,
.print_info = drm_gem_shmem_object_print_info,
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
@@ -315,11 +279,9 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
return PTR_ERR(bo);
}
- ret = drm_gem_handle_create(file, &bo->base.base, &bo->handle);
- if (!ret) {
+ ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+ if (!ret)
args->vpu_addr = bo->vpu_addr;
- args->handle = bo->handle;
- }
drm_gem_object_put(&bo->base.base);
@@ -361,7 +323,9 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
if (ret)
goto err_put;
+ dma_resv_lock(bo->base.base.resv, NULL);
ret = drm_gem_shmem_vmap(&bo->base, &map);
+ dma_resv_unlock(bo->base.base.resv);
if (ret)
goto err_put;
@@ -376,7 +340,10 @@ void ivpu_bo_free_internal(struct ivpu_bo *bo)
{
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
+ dma_resv_lock(bo->base.base.resv, NULL);
drm_gem_shmem_vunmap(&bo->base, &map);
+ dma_resv_unlock(bo->base.base.resv);
+
drm_gem_object_put(&bo->base.base);
}
@@ -432,19 +399,11 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
{
- unsigned long dma_refcount = 0;
-
mutex_lock(&bo->lock);
- if (bo->base.base.dma_buf && bo->base.base.dma_buf->file)
- dma_refcount = atomic_long_read(&bo->base.base.dma_buf->file->f_count);
-
- drm_printf(p, "%-3u %-6d 0x%-12llx %-10lu 0x%-8x %-4u %-8lu",
- bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.base.size,
- bo->flags, kref_read(&bo->base.base.refcount), dma_refcount);
-
- if (bo->base.base.import_attach)
- drm_printf(p, " imported");
+ drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
+ bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
+ bo->flags, kref_read(&bo->base.base.refcount));
if (bo->base.pages)
drm_printf(p, " has_pages");
@@ -452,6 +411,9 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
if (bo->mmu_mapped)
drm_printf(p, " mmu_mapped");
+ if (bo->base.base.import_attach)
+ drm_printf(p, " imported");
+
drm_printf(p, "\n");
mutex_unlock(&bo->lock);
@@ -462,8 +424,8 @@ void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_bo *bo;
- drm_printf(p, "%-3s %-6s %-14s %-10s %-10s %-4s %-8s %s\n",
- "ctx", "handle", "vpu_addr", "size", "flags", "refs", "dma_refs", "attribs");
+ drm_printf(p, "%-9s %-3s %-14s %-10s %-10s %-4s %s\n",
+ "bo", "ctx", "vpu_addr", "size", "flags", "refs", "attribs");
mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index d75cad0d3c74..a8559211c70d 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -19,14 +19,13 @@ struct ivpu_bo {
struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
u64 vpu_addr;
- u32 handle;
u32 flags;
u32 job_status; /* Valid only for command buffer */
bool mmu_mapped;
};
int ivpu_bo_pin(struct ivpu_bo *bo);
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 574cdeefb66b..89af1006df55 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -510,22 +510,12 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
return ret;
}
-static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
-{
- ivpu_boot_dpu_active_drive(vdev, false);
- ivpu_boot_pwr_island_isolation_drive(vdev, true);
- ivpu_boot_pwr_island_trickle_drive(vdev, false);
- ivpu_boot_pwr_island_drive(vdev, false);
-
- return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
-}
-
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
- val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@@ -616,12 +606,37 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
return 0;
}
+static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
+ val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
+
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+ return ret;
+}
+
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
int ret = 0;
- if (ivpu_boot_pwr_domain_disable(vdev)) {
- ivpu_err(vdev, "Failed to disable power domain\n");
+ if (ivpu_hw_37xx_ip_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset NPU\n");
ret = -EIO;
}
@@ -661,6 +676,11 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
+ /* PLL requests may fail when powering down, so issue WP 0 here */
+ ret = ivpu_pll_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret);
+
ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@@ -875,24 +895,18 @@ static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
- ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
-
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
}
static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
- ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
-
ivpu_hw_wdt_disable(vdev);
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
}
static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
- ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
-
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
}
/* Handler for IRQs from VPU core (irqV) */
@@ -970,7 +984,7 @@ static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
if (schedule_recovery)
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
return true;
}
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index eba2fdef2ace..a1523d0b1ef3 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -24,7 +24,7 @@
#define SKU_HW_ID_SHIFT 16u
#define SKU_HW_ID_MASK 0xffff0000u
-#define PLL_CONFIG_DEFAULT 0x1
+#define PLL_CONFIG_DEFAULT 0x0
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_REF_CLK_FREQ (50 * 1000000)
@@ -530,7 +530,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
- val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@@ -704,7 +704,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u32 tile_disable;
- u32 tile_enable;
u32 fuse;
fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
@@ -725,10 +724,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
else
ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
- tile_enable = (~tile_disable) & TILE_MAX_MASK;
-
- hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
- hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
hw->tile_fuse = tile_disable;
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
@@ -746,7 +741,7 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
return 0;
}
-static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+static int ivpu_hw_40xx_ip_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
@@ -768,6 +763,23 @@ static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
return ret;
}
+static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+{
+ int ret = 0;
+
+ if (ivpu_hw_40xx_ip_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset VPU IP\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_pll_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable PLL\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
{
int ret;
@@ -913,7 +925,7 @@ static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
- if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev))
+ if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev))
ivpu_warn(vdev, "Failed to reset the VPU\n");
if (ivpu_pll_disable(vdev)) {
@@ -1032,18 +1044,18 @@ static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
/* TODO: For LNN hang consider engine reset instead of full recovery */
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
}
static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
ivpu_hw_wdt_disable(vdev);
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
}
static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
}
/* Handler for IRQs from VPU core (irqV) */
@@ -1137,7 +1149,7 @@ static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
if (schedule_recovery)
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
return true;
}
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index e86621f16f85..fa66c39b57ec 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -343,10 +343,8 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r
hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
&hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
vdev->timeout.jsm);
- if (hb_ret == -ETIMEDOUT) {
- ivpu_hw_diagnose_failure(vdev);
- ivpu_pm_schedule_recovery(vdev);
- }
+ if (hb_ret == -ETIMEDOUT)
+ ivpu_pm_trigger_recovery(vdev, "IPC timeout");
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 7206cf9cdb4a..e70cfb859339 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -112,22 +112,20 @@ static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engin
}
}
-void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv)
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{
int i;
- mutex_lock(&file_priv->lock);
+ lockdep_assert_held(&file_priv->lock);
for (i = 0; i < IVPU_NUM_ENGINES; i++)
ivpu_cmdq_release_locked(file_priv, i);
-
- mutex_unlock(&file_priv->lock);
}
/*
* Mark the doorbell as unregistered and reset job queue pointers.
* This function needs to be called when the VPU hardware is restarted
- * and FW looses job queue state. The next time job queue is used it
+ * and FW loses job queue state. The next time job queue is used it
* will be registered again.
*/
static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
@@ -161,15 +159,13 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
- xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
- file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
- if (!file_priv)
- continue;
+ mutex_lock(&vdev->context_list_lock);
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv)
ivpu_cmdq_reset_all(file_priv);
- ivpu_file_priv_put(&file_priv);
- }
+ mutex_unlock(&vdev->context_list_lock);
+
}
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@@ -243,60 +239,32 @@ static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
return &fence->base;
}
-static void job_get(struct ivpu_job *job, struct ivpu_job **link)
+static void ivpu_job_destroy(struct ivpu_job *job)
{
struct ivpu_device *vdev = job->vdev;
-
- kref_get(&job->ref);
- *link = job;
-
- ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
-}
-
-static void job_release(struct kref *ref)
-{
- struct ivpu_job *job = container_of(ref, struct ivpu_job, ref);
- struct ivpu_device *vdev = job->vdev;
u32 i;
+ ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
+ job->job_id, job->file_priv->ctx.id, job->engine_idx);
+
for (i = 0; i < job->bo_count; i++)
if (job->bos[i])
drm_gem_object_put(&job->bos[i]->base.base);
dma_fence_put(job->done_fence);
ivpu_file_priv_put(&job->file_priv);
-
- ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id);
kfree(job);
-
- /* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
- ivpu_rpm_put(vdev);
-}
-
-static void job_put(struct ivpu_job *job)
-{
- struct ivpu_device *vdev = job->vdev;
-
- ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
- kref_put(&job->ref, job_release);
}
static struct ivpu_job *
-ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
+ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
- int ret;
-
- ret = ivpu_rpm_get(vdev);
- if (ret < 0)
- return NULL;
job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
if (!job)
- goto err_rpm_put;
-
- kref_init(&job->ref);
+ return NULL;
job->vdev = vdev;
job->engine_idx = engine_idx;
@@ -310,17 +278,14 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
job->file_priv = ivpu_file_priv_get(file_priv);
ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
-
return job;
err_free_job:
kfree(job);
-err_rpm_put:
- ivpu_rpm_put(vdev);
return NULL;
}
-static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
{
struct ivpu_job *job;
@@ -329,7 +294,7 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
return -ENOENT;
if (job->file_priv->has_mmu_faults)
- job_status = VPU_JSM_STATUS_ABORTED;
+ job_status = DRM_IVPU_JOB_STATUS_ABORTED;
job->bos[CMD_BUF_IDX]->job_status = job_status;
dma_fence_signal(job->done_fence);
@@ -337,9 +302,10 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
+ ivpu_job_destroy(job);
ivpu_stop_job_timeout_detection(vdev);
- job_put(job);
+ ivpu_rpm_put(vdev);
return 0;
}
@@ -349,10 +315,10 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
unsigned long id;
xa_for_each(&vdev->submitted_jobs_xa, id, job)
- ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED);
+ ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
}
-static int ivpu_direct_job_submission(struct ivpu_job *job)
+static int ivpu_job_submit(struct ivpu_job *job)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev;
@@ -360,53 +326,65 @@ static int ivpu_direct_job_submission(struct ivpu_job *job)
struct ivpu_cmdq *cmdq;
int ret;
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
if (!cmdq) {
- ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n",
- file_priv->ctx.id, job->engine_idx);
+ ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
+ file_priv->ctx.id, job->engine_idx);
ret = -EINVAL;
- goto err_unlock;
+ goto err_unlock_file_priv;
}
job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
- job_get(job, &job);
- ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
+ xa_lock(&vdev->submitted_jobs_xa);
+ ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
if (ret) {
- ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret);
- goto err_job_put;
+ ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
+ file_priv->ctx.id);
+ ret = -EBUSY;
+ goto err_unlock_submitted_jobs_xa;
}
ret = ivpu_cmdq_push_job(cmdq, job);
if (ret)
- goto err_xa_erase;
+ goto err_erase_xa;
ivpu_start_job_timeout_detection(vdev);
- ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
- job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
- job->engine_idx, cmdq->jobq->header.tail);
-
- if (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW) {
- ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
cmdq->jobq->header.head = cmdq->jobq->header.tail;
wmb(); /* Flush WC buffer for jobq header */
} else {
ivpu_cmdq_ring_db(vdev, cmdq);
}
+ ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
+ job->job_id, file_priv->ctx.id, job->engine_idx,
+ job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
+
+ xa_unlock(&vdev->submitted_jobs_xa);
+
mutex_unlock(&file_priv->lock);
+
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
+ ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+
return 0;
-err_xa_erase:
- xa_erase(&vdev->submitted_jobs_xa, job->job_id);
-err_job_put:
- job_put(job);
-err_unlock:
+err_erase_xa:
+ __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_unlock_submitted_jobs_xa:
+ xa_unlock(&vdev->submitted_jobs_xa);
+err_unlock_file_priv:
mutex_unlock(&file_priv->lock);
+ ivpu_rpm_put(vdev);
return ret;
}
@@ -488,6 +466,9 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (params->engine > DRM_IVPU_ENGINE_COPY)
return -EINVAL;
+ if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ return -EINVAL;
+
if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
return -EINVAL;
@@ -509,44 +490,49 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
params->buffer_count * sizeof(u32));
if (ret) {
ret = -EFAULT;
- goto free_handles;
+ goto err_free_handles;
}
if (!drm_dev_enter(&vdev->drm, &idx)) {
ret = -ENODEV;
- goto free_handles;
+ goto err_free_handles;
}
ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
file_priv->ctx.id, params->buffer_count);
- job = ivpu_create_job(file_priv, params->engine, params->buffer_count);
+ job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
if (!job) {
ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM;
- goto dev_exit;
+ goto err_exit_dev;
}
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
params->commands_offset);
if (ret) {
- ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret);
- goto job_put;
+ ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
+ goto err_destroy_job;
}
- ret = ivpu_direct_job_submission(job);
- if (ret) {
- dma_fence_signal(job->done_fence);
- ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret);
- }
+ down_read(&vdev->pm->reset_lock);
+ ret = ivpu_job_submit(job);
+ up_read(&vdev->pm->reset_lock);
+ if (ret)
+ goto err_signal_fence;
-job_put:
- job_put(job);
-dev_exit:
drm_dev_exit(idx);
-free_handles:
kfree(buf_handles);
+ return ret;
+err_signal_fence:
+ dma_fence_signal(job->done_fence);
+err_destroy_job:
+ ivpu_job_destroy(job);
+err_exit_dev:
+ drm_dev_exit(idx);
+err_free_handles:
+ kfree(buf_handles);
return ret;
}
@@ -568,7 +554,7 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
}
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
- ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
+ ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
ivpu_start_job_timeout_detection(vdev);
}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 45a2f2ec82e5..ca4984071cc7 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -43,7 +43,6 @@ struct ivpu_cmdq {
will update the job status
*/
struct ivpu_job {
- struct kref ref;
struct ivpu_device *vdev;
struct ivpu_file_priv *file_priv;
struct dma_fence *done_fence;
@@ -56,7 +55,7 @@ struct ivpu_job {
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
-void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv);
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 2228c44b115f..91bd640655ab 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -7,6 +7,7 @@
#include <linux/highmem.h>
#include "ivpu_drv.h"
+#include "ivpu_hw.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
@@ -71,10 +72,10 @@
#define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
#define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
-#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1)
-#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1)
-#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
+#define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
+#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
#define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
+#define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT)
#define IVPU_MMU_CMDQ_CMD_SIZE 16
#define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
@@ -474,20 +475,32 @@ static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
return 0;
}
+static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
+{
+ return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
+ (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
+}
+
+static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
+{
+ return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
+ (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
+}
+
static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
{
- struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
- u64 *queue_buffer = q->base;
- int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
+ struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
+ u64 *queue_buffer = cmdq->base;
+ int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
- if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
+ if (ivpu_mmu_queue_is_full(cmdq)) {
ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
return -EBUSY;
}
queue_buffer[idx] = data0;
queue_buffer[idx + 1] = data1;
- q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
+ cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
@@ -518,6 +531,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret,
ivpu_mmu_cmdq_err_to_str(err));
+ ivpu_hw_diagnose_failure(vdev);
}
return ret;
@@ -558,7 +572,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->cmdq.cons = 0;
memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
- clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
@@ -872,20 +885,15 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
- if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
+ if (ivpu_mmu_queue_is_empty(evtq))
return NULL;
- clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
-
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
- REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons);
-
return evt;
}
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
{
- bool schedule_recovery = false;
u32 *event;
u32 ssid;
@@ -895,14 +903,22 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
ivpu_mmu_dump_event(vdev, event);
ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
- schedule_recovery = true;
- else
- ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+ ivpu_pm_trigger_recovery(vdev, "MMU event");
+ return;
+ }
+
+ ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+ REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
}
+}
- if (schedule_recovery)
- ivpu_pm_schedule_recovery(vdev);
+void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
+{
+ u32 *event;
+
+ while ((event = ivpu_mmu_get_event(vdev)) != NULL)
+ ivpu_mmu_dump_event(vdev, event);
}
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
index cb551126806b..6fa35c240710 100644
--- a/drivers/accel/ivpu/ivpu_mmu.h
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -46,5 +46,6 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
+void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
#endif /* __IVPU_MMU_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 12a8c09d4547..fe6161299236 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -355,6 +355,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset;
+ ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
+ ctx->id, dma_addr, vpu_addr, size);
+
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) {
ivpu_err(vdev, "Failed to map context pages\n");
@@ -366,6 +369,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
/* Ensure page table modifications are flushed from wc buffers to memory */
wmb();
+
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
@@ -388,14 +392,19 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
mutex_lock(&ctx->lock);
for_each_sgtable_dma_sg(sgt, sg, i) {
+ dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset;
+ ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
+ ctx->id, dma_addr, vpu_addr, size);
+
ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
vpu_addr += size;
}
/* Ensure page table modifications are flushed from wc buffers to memory */
wmb();
+
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 0af8864cb3b5..5f73854234ba 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -13,6 +13,7 @@
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
@@ -57,11 +58,14 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{
int ret;
+ /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
+ pci_save_state(to_pci_dev(vdev->drm.dev));
+
ret = ivpu_shutdown(vdev);
- if (ret) {
+ if (ret)
ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
- return ret;
- }
+
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
return ret;
}
@@ -70,6 +74,9 @@ static int ivpu_resume(struct ivpu_device *vdev)
{
int ret;
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+ pci_restore_state(to_pci_dev(vdev->drm.dev));
+
retry:
ret = ivpu_hw_power_up(vdev);
if (ret) {
@@ -111,22 +118,37 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
int ret;
-retry:
- ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
- if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
- cond_resched();
- goto retry;
- }
+ ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
- if (ret && ret != -EAGAIN)
- ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
+ ret = pm_runtime_resume_and_get(vdev->drm.dev);
+ if (ret)
+ ivpu_err(vdev, "Failed to resume VPU: %d\n", ret);
+
+ ivpu_fw_log_dump(vdev);
+
+ atomic_inc(&vdev->pm->reset_counter);
+ atomic_set(&vdev->pm->reset_pending, 1);
+ down_write(&vdev->pm->reset_lock);
+
+ ivpu_suspend(vdev);
+ ivpu_pm_prepare_cold_boot(vdev);
+ ivpu_jobs_abort_all(vdev);
+
+ ret = ivpu_resume(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+
+ up_write(&vdev->pm->reset_lock);
+ atomic_set(&vdev->pm->reset_pending, 0);
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
}
-void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
+void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
{
- struct ivpu_pm_info *pm = vdev->pm;
+ ivpu_err(vdev, "Recovery triggered by %s\n", reason);
if (ivpu_disable_recovery) {
ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
@@ -138,10 +160,11 @@ void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
return;
}
- /* Schedule recovery if it's not in progress */
- if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) {
- ivpu_hw_irq_disable(vdev);
- queue_work(system_long_wq, &pm->recovery_work);
+ /* Trigger recovery if it's not in progress */
+ if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
+ queue_work(system_long_wq, &vdev->pm->recovery_work);
}
}
@@ -149,12 +172,8 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev;
- unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
-
- ivpu_err(vdev, "TDR detected, timeout %lu ms", timeout_ms);
- ivpu_hw_diagnose_failure(vdev);
- ivpu_pm_schedule_recovery(vdev);
+ ivpu_pm_trigger_recovery(vdev, "TDR");
}
void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
@@ -192,9 +211,6 @@ int ivpu_pm_suspend_cb(struct device *dev)
ivpu_suspend(vdev);
ivpu_pm_prepare_warm_boot(vdev);
- pci_save_state(to_pci_dev(dev));
- pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
-
ivpu_dbg(vdev, PM, "Suspend done.\n");
return 0;
@@ -208,9 +224,6 @@ int ivpu_pm_resume_cb(struct device *dev)
ivpu_dbg(vdev, PM, "Resume..\n");
- pci_set_power_state(to_pci_dev(dev), PCI_D0);
- pci_restore_state(to_pci_dev(dev));
-
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to resume: %d\n", ret);
@@ -227,6 +240,9 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
bool hw_is_idle = true;
int ret;
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+ drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
+
ivpu_dbg(vdev, PM, "Runtime suspend..\n");
if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
@@ -247,7 +263,8 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
if (!hw_is_idle) {
- ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n");
+ ivpu_err(vdev, "VPU failed to enter idle, force suspended.\n");
+ ivpu_fw_log_dump(vdev);
ivpu_pm_prepare_cold_boot(vdev);
} else {
ivpu_pm_prepare_warm_boot(vdev);
@@ -308,11 +325,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
- pm_runtime_get_sync(vdev->drm.dev);
-
ivpu_dbg(vdev, PM, "Pre-reset..\n");
atomic_inc(&vdev->pm->reset_counter);
- atomic_set(&vdev->pm->in_reset, 1);
+ atomic_set(&vdev->pm->reset_pending, 1);
+
+ pm_runtime_get_sync(vdev->drm.dev);
+ down_write(&vdev->pm->reset_lock);
ivpu_prepare_for_reset(vdev);
ivpu_hw_reset(vdev);
ivpu_pm_prepare_cold_boot(vdev);
@@ -329,9 +347,11 @@ void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
- atomic_set(&vdev->pm->in_reset, 0);
+ up_write(&vdev->pm->reset_lock);
+ atomic_set(&vdev->pm->reset_pending, 0);
ivpu_dbg(vdev, PM, "Post-reset done.\n");
+ pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
@@ -344,7 +364,10 @@ void ivpu_pm_init(struct ivpu_device *vdev)
pm->vdev = vdev;
pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
- atomic_set(&pm->in_reset, 0);
+ init_rwsem(&pm->reset_lock);
+ atomic_set(&pm->reset_pending, 0);
+ atomic_set(&pm->reset_counter, 0);
+
INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index 97c6e0b0aa42..ec60fbeefefc 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -6,6 +6,7 @@
#ifndef __IVPU_PM_H__
#define __IVPU_PM_H__
+#include <linux/rwsem.h>
#include <linux/types.h>
struct ivpu_device;
@@ -14,8 +15,9 @@ struct ivpu_pm_info {
struct ivpu_device *vdev;
struct delayed_work job_timeout_work;
struct work_struct recovery_work;
- atomic_t in_reset;
+ struct rw_semaphore reset_lock;
atomic_t reset_counter;
+ atomic_t reset_pending;
bool is_warmboot;
u32 suspend_reschedule_counter;
};
@@ -37,7 +39,7 @@ int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev);
void ivpu_rpm_put(struct ivpu_device *vdev);
-void ivpu_pm_schedule_recovery(struct ivpu_device *vdev);
+void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason);
void ivpu_start_job_timeout_detection(struct ivpu_device *vdev);
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 7b7c605166e0..ab2a82cb1b0b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
-#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -674,78 +673,6 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
-/*
- * Only a single callback can be registered for CXL CPER events.
- */
-static DECLARE_RWSEM(cxl_cper_rw_sem);
-static cxl_cper_callback cper_callback;
-
-/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
-
-/*
- * General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
- */
-#define CPER_SEC_CXL_GEN_MEDIA_GUID \
- GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
- 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
-
-/*
- * DRAM Event Record
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
- */
-#define CPER_SEC_CXL_DRAM_GUID \
- GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
- 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
-
-/*
- * Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
- */
-#define CPER_SEC_CXL_MEM_MODULE_GUID \
- GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
- 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
-
-static void cxl_cper_post_event(enum cxl_event_type event_type,
- struct cxl_cper_event_rec *rec)
-{
- if (rec->hdr.length <= sizeof(rec->hdr) ||
- rec->hdr.length > sizeof(*rec)) {
- pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
- rec->hdr.length);
- return;
- }
-
- if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
- pr_err(FW_WARN "CXL CPER invalid event\n");
- return;
- }
-
- guard(rwsem_read)(&cxl_cper_rw_sem);
- if (cper_callback)
- cper_callback(event_type, rec);
-}
-
-int cxl_cper_register_callback(cxl_cper_callback callback)
-{
- guard(rwsem_write)(&cxl_cper_rw_sem);
- if (cper_callback)
- return -EINVAL;
- cper_callback = callback;
- return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL);
-
-int cxl_cper_unregister_callback(cxl_cper_callback callback)
-{
- guard(rwsem_write)(&cxl_cper_rw_sem);
- if (callback != cper_callback)
- return -EINVAL;
- cper_callback = NULL;
- return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL);
-
static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -780,22 +707,6 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
- } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
- } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
- } else if (guid_equal(sec_type,
- &CPER_SEC_CXL_MEM_MODULE_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
} else {
void *err = acpi_hest_get_payload(gdata);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index dbdee2924594..02255795b800 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -525,10 +525,12 @@ static void acpi_ec_clear(struct acpi_ec *ec)
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec))
__acpi_ec_enable_event(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
/* Drain additional events if hardware requires that */
if (EC_FLAGS_CLEAR_ON_RESUME)
@@ -544,9 +546,11 @@ static void __acpi_ec_flush_work(void)
static void acpi_ec_disable_event(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
__acpi_ec_disable_event(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
/*
* When ec_freeze_events is true, we need to flush events in
@@ -567,9 +571,10 @@ void acpi_ec_flush_work(void)
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
+ unsigned long flags;
bool guarded;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
/*
* If firmware SCI_EVT clearing timing is "event", we actually
* don't know when the SCI_EVT will be cleared by firmware after
@@ -585,29 +590,31 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec)
guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
ec->event_state != EC_EVENT_READY &&
(!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return guarded;
}
static int ec_transaction_polled(struct acpi_ec *ec)
{
+ unsigned long flags;
int ret = 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
ret = 1;
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
static int ec_transaction_completed(struct acpi_ec *ec)
{
+ unsigned long flags;
int ret = 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
ret = 1;
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
@@ -749,6 +756,7 @@ static int ec_guard(struct acpi_ec *ec)
static int ec_poll(struct acpi_ec *ec)
{
+ unsigned long flags;
int repeat = 5; /* number of command restarts */
while (repeat--) {
@@ -757,14 +765,14 @@ static int ec_poll(struct acpi_ec *ec)
do {
if (!ec_guard(ec))
return 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
advance_transaction(ec, false);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
return -ETIME;
}
@@ -772,10 +780,11 @@ static int ec_poll(struct acpi_ec *ec)
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
struct transaction *t)
{
+ unsigned long tmp;
int ret = 0;
/* start transaction */
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL;
@@ -786,11 +795,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ec->curr = t;
ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
start_transaction(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, tmp);
ret = ec_poll(ec);
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
acpi_ec_unmask_events(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
@@ -799,7 +808,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
acpi_ec_complete_request(ec);
ec_dbg_ref(ec, "Decrease command");
unlock:
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
}
@@ -927,7 +936,9 @@ EXPORT_SYMBOL(ec_get_handle);
static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
ec_dbg_drv("Starting EC");
/* Enable GPE for event processing (SCI_EVT=1) */
@@ -937,28 +948,31 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
}
ec_log_drv("EC started");
}
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static bool acpi_ec_stopped(struct acpi_ec *ec)
{
+ unsigned long flags;
bool flushed;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
flushed = acpi_ec_flushed(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return flushed;
}
static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec)) {
ec_dbg_drv("Stopping EC");
set_bit(EC_FLAGS_STOPPED, &ec->flags);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
wait_event(ec->wait, acpi_ec_stopped(ec));
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
/* Disable GPE for event processing (SCI_EVT=1) */
if (!suspending) {
acpi_ec_complete_request(ec);
@@ -969,25 +983,29 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
ec_log_drv("EC stopped");
}
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_enter_noirq(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = true;
ec->polling_guard = 0;
ec_log_drv("interrupt blocked");
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_leave_noirq(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = ec_busy_polling;
ec->polling_guard = ec_polling_guard;
ec_log_drv("interrupt unblocked");
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
void acpi_ec_block_transactions(void)
@@ -1119,9 +1137,9 @@ static void acpi_ec_event_processor(struct work_struct *work)
ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->queries_in_progress--;
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
acpi_ec_put_query_handler(handler);
kfree(q);
@@ -1184,12 +1202,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec)
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->queries_in_progress++;
queue_work(ec_query_wq, &q->work);
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
return 0;
@@ -1205,14 +1223,14 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event started");
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
while (ec->events_to_process) {
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
acpi_ec_submit_query(ec);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->events_to_process--;
}
@@ -1229,11 +1247,11 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event stopped");
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
guard_timeout = !!ec_guard(ec);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
/* Take care of SCI_EVT unless someone else is doing that. */
if (guard_timeout && !ec->curr)
@@ -1246,7 +1264,7 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec->events_in_progress--;
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
}
static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
@@ -1271,11 +1289,13 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt
static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
clear_gpe_and_advance_transaction(ec, true);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -2085,7 +2105,7 @@ bool acpi_ec_dispatch_gpe(void)
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
- spin_lock(&first_ec->lock);
+ spin_lock_irq(&first_ec->lock);
if (acpi_ec_gpe_status_set(first_ec)) {
pm_pr_dbg("ACPI EC GPE status set\n");
@@ -2094,7 +2114,7 @@ bool acpi_ec_dispatch_gpe(void)
work_in_progress = acpi_ec_work_in_progress(first_ec);
}
- spin_unlock(&first_ec->lock);
+ spin_unlock_irq(&first_ec->lock);
if (!work_in_progress)
return false;
@@ -2107,11 +2127,11 @@ bool acpi_ec_dispatch_gpe(void)
pm_pr_dbg("ACPI EC work flushed\n");
- spin_lock(&first_ec->lock);
+ spin_lock_irq(&first_ec->lock);
work_in_progress = acpi_ec_work_in_progress(first_ec);
- spin_unlock(&first_ec->lock);
+ spin_unlock_irq(&first_ec->lock);
} while (work_in_progress && !pm_wakeup_pending());
return false;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8dd23b19e997..eca24f41556d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -478,6 +478,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
{
WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
+
+ /* (e)poll-based threads require an explicit wakeup signal when
+ * queuing their own work; they rely on these events to consume
+ * messages without I/O block. Without it, threads risk waiting
+ * indefinitely without handling the work.
+ */
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+ thread->pid == current->pid && !thread->process_todo)
+ wake_up_interruptible_sync(&thread->wait);
+
thread->process_todo = true;
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 3a5f3255f51b..682ff550ccfb 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -48,6 +48,7 @@ enum {
enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
+ board_ahci_43bit_dma,
board_ahci_ign_iferr,
board_ahci_low_power,
board_ahci_no_debounce_delay,
@@ -128,6 +129,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_43bit_dma] = {
+ AHCI_HFLAGS (AHCI_HFLAG_43BIT_ONLY),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_ign_iferr] = {
AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
.flags = AHCI_FLAG_COMMON,
@@ -597,14 +605,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
{ PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
- /* Asmedia */
- { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
- { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
- { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
- { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
- { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
- { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
- { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
+ /* ASMedia */
+ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci_43bit_dma }, /* ASM1060 */
+ { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci_43bit_dma }, /* ASM1060 */
+ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
+ { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci_43bit_dma }, /* ASM1061R */
+ { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci_43bit_dma }, /* ASM1062R */
+ { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci_43bit_dma }, /* ASM1062+JMB575 */
{ PCI_VDEVICE(ASMEDIA, 0x1062), board_ahci }, /* ASM1062A */
{ PCI_VDEVICE(ASMEDIA, 0x1064), board_ahci }, /* ASM1064 */
{ PCI_VDEVICE(ASMEDIA, 0x1164), board_ahci }, /* ASM1164 */
@@ -663,6 +671,19 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) {
+ switch (pdev->device) {
+ case 0x1166:
+ dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+ hpriv->saved_port_map = 0x3f;
+ break;
+ case 0x1064:
+ dev_info(&pdev->dev, "ASM1064 has only four ports\n");
+ hpriv->saved_port_map = 0xf;
+ break;
+ }
+ }
+
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
hpriv->saved_port_map = 1;
@@ -949,11 +970,20 @@ static int ahci_pci_device_resume(struct device *dev)
#endif /* CONFIG_PM */
-static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+static int ahci_configure_dma_masks(struct pci_dev *pdev,
+ struct ahci_host_priv *hpriv)
{
- const int dma_bits = using_dac ? 64 : 32;
+ int dma_bits;
int rc;
+ if (hpriv->cap & HOST_CAP_64) {
+ dma_bits = 64;
+ if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY)
+ dma_bits = 43;
+ } else {
+ dma_bits = 32;
+ }
+
/*
* If the device fixup already set the dma_mask to some non-standard
* value, don't extend it here. This happens on STA2X11, for example.
@@ -1926,7 +1956,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_gtf_filter_workaround(host);
/* initialize adapter */
- rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
+ rc = ahci_configure_dma_masks(pdev, hpriv);
if (rc)
return rc;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 4bae95b06ae3..df8f8a1a3a34 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -247,6 +247,7 @@ enum {
AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
suspend/resume */
AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
+ AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 64f7f7d6ba84..11a2c199a7c2 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -88,7 +88,6 @@ struct ceva_ahci_priv {
u32 axicc;
bool is_cci_enabled;
int flags;
- struct reset_control *rst;
};
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
@@ -189,6 +188,60 @@ static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
+static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
+{
+ int rc, i;
+
+ rc = ahci_platform_enable_regulators(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_platform_enable_clks(hpriv);
+ if (rc)
+ goto disable_regulator;
+
+ /* Assert the controller reset */
+ rc = ahci_platform_assert_rsts(hpriv);
+ if (rc)
+ goto disable_clks;
+
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_init(hpriv->phys[i]);
+ if (rc)
+ goto disable_rsts;
+ }
+
+ /* De-assert the controller reset */
+ ahci_platform_deassert_rsts(hpriv);
+
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_power_on(hpriv->phys[i]);
+ if (rc) {
+ phy_exit(hpriv->phys[i]);
+ goto disable_phys;
+ }
+ }
+
+ return 0;
+
+disable_rsts:
+ ahci_platform_deassert_rsts(hpriv);
+
+disable_phys:
+ while (--i >= 0) {
+ phy_power_off(hpriv->phys[i]);
+ phy_exit(hpriv->phys[i]);
+ }
+
+disable_clks:
+ ahci_platform_disable_clks(hpriv);
+
+disable_regulator:
+ ahci_platform_disable_regulators(hpriv);
+
+ return rc;
+}
+
static int ceva_ahci_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev)
return -ENOMEM;
cevapriv->ahci_pdev = pdev;
-
- cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
- NULL);
- if (IS_ERR(cevapriv->rst))
- dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
- "failed to get reset\n");
-
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
- if (!cevapriv->rst) {
- rc = ahci_platform_enable_resources(hpriv);
- if (rc)
- return rc;
- } else {
- int i;
+ hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ NULL);
+ if (IS_ERR(hpriv->rsts))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts),
+ "failed to get reset\n");
- rc = ahci_platform_enable_clks(hpriv);
- if (rc)
- return rc;
- /* Assert the controller reset */
- reset_control_assert(cevapriv->rst);
-
- for (i = 0; i < hpriv->nports; i++) {
- rc = phy_init(hpriv->phys[i]);
- if (rc)
- return rc;
- }
-
- /* De-assert the controller reset */
- reset_control_deassert(cevapriv->rst);
-
- for (i = 0; i < hpriv->nports; i++) {
- rc = phy_power_on(hpriv->phys[i]);
- if (rc) {
- phy_exit(hpriv->phys[i]);
- return rc;
- }
- }
- }
+ rc = ceva_ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
if (of_property_read_bool(np, "ceva,broken-gen2"))
cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
@@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev)
if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
(u8 *)&cevapriv->pp2c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
(u8 *)&cevapriv->pp2c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read OOB timing value for COMWAKE from device-tree*/
if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
(u8 *)&cevapriv->pp3c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
(u8 *)&cevapriv->pp3c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read phy BURST timing value from device-tree */
if (of_property_read_u8_array(np, "ceva,p0-burst-params",
(u8 *)&cevapriv->pp4c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-burst-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-burst-params",
(u8 *)&cevapriv->pp4c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-burst-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read phy RETRY interval timing value from device-tree */
if (of_property_read_u16_array(np, "ceva,p0-retry-params",
(u16 *)&cevapriv->pp5c[0], 2) < 0) {
dev_warn(dev, "ceva,p0-retry-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u16_array(np, "ceva,p1-retry-params",
(u16 *)&cevapriv->pp5c[1], 2) < 0) {
dev_warn(dev, "ceva,p1-retry-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/*
@@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev)
struct ahci_host_priv *hpriv = host->private_data;
int rc;
- rc = ahci_platform_enable_resources(hpriv);
+ rc = ceva_ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 09ed67772fae..be3412cdb22e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2001,6 +2001,33 @@ bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf,
return true;
}
+static bool ata_dev_power_is_active(struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_CHK_POWER;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask) {
+ ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
+ err_mask);
+ /*
+ * Assume we are in standby mode so that we always force a
+ * spinup in ata_dev_power_set_active().
+ */
+ return false;
+ }
+
+ ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
+
+ /* Active or idle */
+ return tf.nsect == 0xff;
+}
+
/**
* ata_dev_power_set_standby - Set a device power mode to standby
* @dev: target device
@@ -2017,6 +2044,11 @@ void ata_dev_power_set_standby(struct ata_device *dev)
struct ata_taskfile tf;
unsigned int err_mask;
+ /* If the device is already sleeping or in standby, do nothing. */
+ if ((dev->flags & ATA_DFLAG_SLEEPING) ||
+ !ata_dev_power_is_active(dev))
+ return;
+
/*
* Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
* causing some drives to spin up and down again. For these, do nothing
@@ -2042,33 +2074,6 @@ void ata_dev_power_set_standby(struct ata_device *dev)
err_mask);
}
-static bool ata_dev_power_is_active(struct ata_device *dev)
-{
- struct ata_taskfile tf;
- unsigned int err_mask;
-
- ata_tf_init(dev, &tf);
- tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
- tf.protocol = ATA_PROT_NODATA;
- tf.command = ATA_CMD_CHK_POWER;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- if (err_mask) {
- ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
- err_mask);
- /*
- * Assume we are in standby mode so that we always force a
- * spinup in ata_dev_power_set_active().
- */
- return false;
- }
-
- ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
-
- /* Active or idle */
- return tf.nsect == 0xff;
-}
-
/**
* ata_dev_power_set_active - Set a device power mode to active
* @dev: target device
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index b6656c287175..0fb1934875f2 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -784,7 +784,7 @@ bool sata_lpm_ignore_phy_events(struct ata_link *link)
EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
static const char *ata_lpm_policy_names[] = {
- [ATA_LPM_UNKNOWN] = "max_performance",
+ [ATA_LPM_UNKNOWN] = "keep_firmware_settings",
[ATA_LPM_MAX_POWER] = "max_performance",
[ATA_LPM_MED_POWER] = "medium_power",
[ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index e327a0229dc1..e7f713cd70d3 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2930,6 +2930,8 @@ open_card_ubr0(struct idt77252_dev *card)
vc->scq = alloc_scq(card, vc->class);
if (!vc->scq) {
printk("%s: can't get SCQ.\n", card->name);
+ kfree(card->vcs[0]);
+ card->vcs[0] = NULL;
return -ENOMEM;
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 018ac202de34..024b78a0cfc1 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -431,9 +431,6 @@ init_cpu_capacity_callback(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
int cpu;
- if (!raw_capacity)
- return 0;
-
if (val != CPUFREQ_CREATE_POLICY)
return 0;
@@ -450,9 +447,11 @@ init_cpu_capacity_callback(struct notifier_block *nb,
}
if (cpumask_empty(cpus_to_visit)) {
- topology_normalize_cpu_scale();
- schedule_work(&update_topology_flags_work);
- free_raw_capacity();
+ if (raw_capacity) {
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ free_raw_capacity();
+ }
pr_debug("cpu_capacity: parsing done\n");
schedule_work(&parsing_done_work);
}
@@ -472,7 +471,7 @@ static int __init register_cpufreq_notifier(void)
* On ACPI-based systems skip registering cpufreq notifier as cpufreq
* information is not needed for cpu capacity initialization.
*/
- if (!acpi_disabled || !raw_capacity)
+ if (!acpi_disabled)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 14d46af40f9a..9828da9b933c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -125,7 +125,7 @@ static void __fwnode_link_del(struct fwnode_link *link)
*/
static void __fwnode_link_cycle(struct fwnode_link *link)
{
- pr_debug("%pfwf: Relaxing link with %pfwf\n",
+ pr_debug("%pfwf: cycle: depends on %pfwf\n",
link->consumer, link->supplier);
link->flags |= FWLINK_FLAG_CYCLE;
}
@@ -284,10 +284,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
return false;
}
+#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
+ DL_FLAG_CYCLE | \
+ DL_FLAG_MANAGED)
static inline bool device_link_flag_is_sync_state_only(u32 flags)
{
- return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
+ return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
/**
@@ -1943,6 +1945,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
/* Termination condition. */
if (sup_dev == con) {
+ pr_debug("----- cycle: start -----\n");
ret = true;
goto out;
}
@@ -1974,8 +1977,11 @@ static bool __fw_devlink_relax_cycles(struct device *con,
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
- if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
+ if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
+ pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
+ par_dev->fwnode);
ret = true;
+ }
if (!sup_dev)
goto out;
@@ -1991,6 +1997,8 @@ static bool __fw_devlink_relax_cycles(struct device *con,
if (__fw_devlink_relax_cycles(con,
dev_link->supplier->fwnode)) {
+ pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
+ dev_link->supplier->fwnode);
fw_devlink_relax_link(dev_link);
dev_link->flags |= DL_FLAG_CYCLE;
ret = true;
@@ -2058,13 +2066,19 @@ static int fw_devlink_create_devlink(struct device *con,
/*
* SYNC_STATE_ONLY device links don't block probing and supports cycles.
- * So cycle detection isn't necessary and shouldn't be done.
+ * So, one might expect that cycle detection isn't necessary for them.
+ * However, if the device link was marked as SYNC_STATE_ONLY because
+ * it's part of a cycle, then we still need to do cycle detection. This
+ * is because the consumer and supplier might be part of multiple cycles
+ * and we need to detect all those cycles.
*/
- if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ if (!device_link_flag_is_sync_state_only(flags) ||
+ flags & DL_FLAG_CYCLE) {
device_links_write_lock();
if (__fw_devlink_relax_cycles(con, sup_handle)) {
__fwnode_link_cycle(link);
flags = fw_devlink_get_flags(link->flags);
+ pr_debug("----- cycle: end -----\n");
dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
sup_handle);
}
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 026bdcb45127..0d957c5f1bcc 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -9,6 +9,23 @@
#define BLOCK_TEST_SIZE 12
+static void get_changed_bytes(void *orig, void *new, size_t size)
+{
+ char *o = orig;
+ char *n = new;
+ int i;
+
+ get_random_bytes(new, size);
+
+ /*
+ * This could be nicer and more efficient but we shouldn't
+ * super care.
+ */
+ for (i = 0; i < size; i++)
+ while (n[i] == o[i])
+ get_random_bytes(&n[i], 1);
+}
+
static const struct regmap_config test_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
@@ -1202,7 +1219,8 @@ static void raw_noinc_write(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
- unsigned int val, val_test, val_last;
+ unsigned int val;
+ u16 val_test, val_last;
u16 val_array[BLOCK_TEST_SIZE];
config = raw_regmap_config;
@@ -1251,7 +1269,7 @@ static void raw_sync(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
- u16 val[2];
+ u16 val[3];
u16 *hw_buf;
unsigned int rval;
int i;
@@ -1265,17 +1283,13 @@ static void raw_sync(struct kunit *test)
hw_buf = (u16 *)data->vals;
- get_random_bytes(&val, sizeof(val));
+ get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
/* Do a regular write and a raw write in cache only mode */
regcache_cache_only(map, true);
- KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
- if (config.val_format_endian == REGMAP_ENDIAN_BIG)
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
- be16_to_cpu(val[0])));
- else
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
- le16_to_cpu(val[0])));
+ KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
+ sizeof(u16) * 2));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
/* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) {
@@ -1284,24 +1298,34 @@ static void raw_sync(struct kunit *test)
switch (i) {
case 2:
case 3:
- case 6:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i % 2]));
+ be16_to_cpu(val[i - 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i % 2]));
+ le16_to_cpu(val[i - 2]));
}
break;
+ case 4:
+ KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
+ break;
default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break;
}
}
+
+ /*
+ * The value written via _write() was translated by the core,
+ * translate the original copy for comparison purposes.
+ */
+ if (config.val_format_endian == REGMAP_ENDIAN_BIG)
+ val[2] = cpu_to_be16(val[2]);
+ else
+ val[2] = cpu_to_le16(val[2]);
/* The values should not appear in the "hardware" */
- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
+ KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
for (i = 0; i < config.max_register + 1; i++)
data->written[i] = false;
@@ -1312,8 +1336,7 @@ static void raw_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The values should now appear in the "hardware" */
- KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
- KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
+ KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
regmap_exit(map);
}
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index d2dbf8aaccb5..b1b47d88f5db 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -333,6 +333,7 @@ aoeblk_gdalloc(void *vp)
struct gendisk *gd;
mempool_t *mp;
struct blk_mq_tag_set *set;
+ sector_t ssize;
ulong flags;
int late = 0;
int err;
@@ -396,7 +397,7 @@ aoeblk_gdalloc(void *vp)
gd->minors = AOE_PARTITIONS;
gd->fops = &aoe_bdops;
gd->private_data = d;
- set_capacity(gd, d->ssize);
+ ssize = d->ssize;
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
d->aoemajor, d->aoeminor);
@@ -405,6 +406,8 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
+ set_capacity(gd, ssize);
+
err = device_add_disk(NULL, gd, aoe_attr_groups);
if (err)
goto out_disk_cleanup;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index a999b698b131..12b5d53ec856 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3452,14 +3452,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
static void rbd_lock_del_request(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
- bool need_wakeup;
+ bool need_wakeup = false;
lockdep_assert_held(&rbd_dev->lock_rwsem);
spin_lock(&rbd_dev->lock_lists_lock);
- rbd_assert(!list_empty(&img_req->lock_item));
- list_del_init(&img_req->lock_item);
- need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
- list_empty(&rbd_dev->running_list));
+ if (!list_empty(&img_req->lock_item)) {
+ list_del_init(&img_req->lock_item);
+ need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+ list_empty(&rbd_dev->running_list));
+ }
spin_unlock(&rbd_dev->lock_lists_lock);
if (need_wakeup)
complete(&rbd_dev->releasing_wait);
@@ -3842,14 +3843,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
return;
}
- list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
+ while (!list_empty(&rbd_dev->acquiring_list)) {
+ img_req = list_first_entry(&rbd_dev->acquiring_list,
+ struct rbd_img_request, lock_item);
mutex_lock(&img_req->state_mutex);
rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
+ if (!result)
+ list_move_tail(&img_req->lock_item,
+ &rbd_dev->running_list);
+ else
+ list_del_init(&img_req->lock_item);
rbd_img_schedule(img_req, result);
mutex_unlock(&img_req->state_mutex);
}
-
- list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
}
static bool locker_equal(const struct ceph_locker *lhs,
@@ -5326,7 +5332,7 @@ static void rbd_dev_release(struct device *dev)
if (need_put) {
destroy_workqueue(rbd_dev->task_wq);
- ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+ ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
}
rbd_dev_free(rbd_dev);
@@ -5402,9 +5408,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return NULL;
/* get an id and fill in device name */
- rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
- minor_to_rbd_dev_id(1 << MINORBITS),
- GFP_KERNEL);
+ rbd_dev->dev_id = ida_alloc_max(&rbd_dev_id_ida,
+ minor_to_rbd_dev_id(1 << MINORBITS) - 1,
+ GFP_KERNEL);
if (rbd_dev->dev_id < 0)
goto fail_rbd_dev;
@@ -5425,7 +5431,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return rbd_dev;
fail_dev_id:
- ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+ ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
fail_rbd_dev:
rbd_dev_free(rbd_dev);
return NULL;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5bf98fd6a651..2bf14a0e2815 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -1593,14 +1593,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ /* Ensure no requests in virtqueues before deleting vqs. */
+ blk_mq_freeze_queue(vblk->disk->queue);
+
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
- blk_mq_quiesce_queue(vblk->disk->queue);
-
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
@@ -1618,7 +1619,7 @@ static int virtblk_restore(struct virtio_device *vdev)
virtio_device_ready(vdev);
- blk_mq_unquiesce_queue(vblk->disk->queue);
+ blk_mq_unfreeze_queue(vblk->disk->queue);
return 0;
}
#endif
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index fdb0fae88d1c..b40b32fa7f1c 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -152,7 +152,7 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev)
bt_dev_dbg(hdev, "QCA Patch config");
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd),
- cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ cmd, 0, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err);
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index a61757835695..9a7243d5db71 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -1417,7 +1417,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
- set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks);
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
kfree_skb(skb);
return 0;
@@ -2368,7 +2368,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->set_bdaddr = bcm4377_hci_set_bdaddr;
hdev->setup = bcm4377_hci_setup;
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
if (bcm4377->hw->broken_mws_transport_config)
set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
if (bcm4377->hw->broken_ext_scan)
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 94b8c406f0c0..edd2a81b4d5e 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -7,6 +7,7 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -1806,13 +1807,12 @@ static int qca_power_on(struct hci_dev *hdev)
static void hci_coredump_qca(struct hci_dev *hdev)
{
+ int err;
static const u8 param[] = { 0x26 };
- struct sk_buff *skb;
- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb));
- kfree_skb(skb);
+ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+ if (err < 0)
+ bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err);
}
static int qca_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
@@ -1904,7 +1904,17 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
+ /* Set BDA quirk bit for reading BDA value from fwnode property
+ * only if that property exist in DT.
+ */
+ if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
+ } else {
+ bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
+ }
+
hci_set_aosp_capable(hdev);
ret = qca_read_soc_version(hdev, &ver, soc_type);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 6b5da73c8541..837bf9d51c6e 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -120,7 +120,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev)
i++;
}
- if (i == 0 || i % 4)
+ if (i == 0)
goto err;
for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
index 57186c58dc84..1d7dd3d2c101 100644
--- a/drivers/cache/ax45mp_cache.c
+++ b/drivers/cache/ax45mp_cache.c
@@ -129,8 +129,12 @@ static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
unsigned long line_size;
unsigned long flags;
+ if (unlikely(start == end))
+ return;
+
line_size = ax45mp_priv.ax45mp_cache_line_size;
start = start & (~(line_size - 1));
+ end = ((end + line_size - 1) & (~(line_size - 1)));
local_irq_save(flags);
ax45mp_cpu_dcache_wb_range(start, end);
local_irq_restore(flags);
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 0964bb11657f..782993951fff 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -2475,7 +2475,7 @@ static const struct samsung_cmu_info misc_cmu_info __initconst = {
.nr_clk_ids = CLKS_NR_MISC,
.clk_regs = misc_clk_regs,
.nr_clk_regs = ARRAY_SIZE(misc_clk_regs),
- .clk_name = "dout_cmu_misc_bus",
+ .clk_name = "bus",
};
/* ---- platform_driver ----------------------------------------------------- */
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 3d5e6d705fc6..44b19e696176 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -108,9 +108,8 @@ static inline void send_msg(struct cn_msg *msg)
filter_data[1] = 0;
}
- if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
- cn_filter, (void *)filter_data) == -ESRCH)
- atomic_set(&proc_event_num_listeners, 0);
+ cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
+ cn_filter, (void *)filter_data);
local_unlock(&local_event.lock);
}
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 1f6186475715..1791d37fbc53 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1232,14 +1232,13 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
cpudata->max_limit_perf);
-
- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
-
value = READ_ONCE(cpudata->cppc_req_cached);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2ca70b0b5fdc..79619227ea51 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -529,6 +529,30 @@ static int intel_pstate_cppc_get_scaling(int cpu)
}
#endif /* CONFIG_ACPI_CPPC_LIB */
+static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
+ unsigned int relation)
+{
+ if (freq == cpu->pstate.turbo_freq)
+ return cpu->pstate.turbo_pstate;
+
+ if (freq == cpu->pstate.max_freq)
+ return cpu->pstate.max_pstate;
+
+ switch (relation) {
+ case CPUFREQ_RELATION_H:
+ return freq / cpu->pstate.scaling;
+ case CPUFREQ_RELATION_C:
+ return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
+ }
+
+ return DIV_ROUND_UP(freq, cpu->pstate.scaling);
+}
+
+static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
+{
+ return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
+}
+
/**
* intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
* @cpu: Target CPU.
@@ -546,6 +570,7 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
int scaling = cpu->pstate.scaling;
+ int freq;
pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
@@ -559,16 +584,16 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
perf_ctl_scaling);
- cpu->pstate.max_pstate_physical =
- DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
- scaling);
+ freq = perf_ctl_max_phys * perf_ctl_scaling;
+ cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
- cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+ freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+ cpu->pstate.min_freq = freq;
/*
* Cast the min P-state value retrieved via pstate_funcs.get_min() to
* the effective range of HWP performance levels.
*/
- cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
+ cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
}
static inline void update_turbo_state(void)
@@ -2528,13 +2553,12 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
* abstract values to represent performance rather than pure ratios.
*/
if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
- int scaling = cpu->pstate.scaling;
int freq;
freq = max_policy_perf * perf_ctl_scaling;
- max_policy_perf = DIV_ROUND_UP(freq, scaling);
+ max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
freq = min_policy_perf * perf_ctl_scaling;
- min_policy_perf = DIV_ROUND_UP(freq, scaling);
+ min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
}
pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
@@ -2908,18 +2932,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
cpufreq_freq_transition_begin(policy, &freqs);
- switch (relation) {
- case CPUFREQ_RELATION_L:
- target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
- break;
- case CPUFREQ_RELATION_H:
- target_pstate = freqs.new / cpu->pstate.scaling;
- break;
- default:
- target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
- break;
- }
-
+ target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
freqs.new = target_pstate * cpu->pstate.scaling;
@@ -2937,7 +2950,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
update_turbo_state();
- target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+ target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
@@ -2974,6 +2987,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
if (min_pstate < cpu->min_perf_ratio)
min_pstate = cpu->min_perf_ratio;
+ if (min_pstate > cpu->max_perf_ratio)
+ min_pstate = cpu->max_perf_ratio;
+
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
if (max_pstate < min_pstate)
max_pstate = min_pstate;
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index dcf2b39e1048..1a3e6aafbdcc 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -316,31 +316,27 @@ static const struct cxl_root_ops acpi_root_ops = {
.qos_class = cxl_acpi_qos_class,
};
-static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
- const unsigned long end)
+static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
+ struct cxl_cfmws_context *ctx)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
- struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
struct resource *cxl_res = ctx->cxl_res;
struct cxl_cxims_context cxims_ctx;
struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
- struct acpi_cedt_cfmws *cfmws;
cxl_calc_hb_fn cxl_calc_hb;
struct cxl_decoder *cxld;
unsigned int ways, i, ig;
struct resource *res;
int rc;
- cfmws = (struct acpi_cedt_cfmws *) header;
-
rc = cxl_acpi_cfmws_verify(dev, cfmws);
if (rc) {
dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
- return 0;
+ return rc;
}
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
@@ -376,7 +372,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
if (IS_ERR(cxlrd))
- return 0;
+ return PTR_ERR(cxlrd);
cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
@@ -420,16 +416,7 @@ err_xormap:
put_device(&cxld->dev);
else
rc = cxl_decoder_autoremove(dev, cxld);
- if (rc) {
- dev_err(dev, "Failed to add decode range: %pr", res);
- return rc;
- }
- dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
- dev_name(&cxld->dev),
- phys_to_target_node(cxld->hpa_range.start),
- cxld->hpa_range.start, cxld->hpa_range.end);
-
- return 0;
+ return rc;
err_insert:
kfree(res->name);
@@ -438,6 +425,29 @@ err_name:
return -ENOMEM;
}
+static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
+ struct cxl_cfmws_context *ctx = arg;
+ struct device *dev = ctx->dev;
+ int rc;
+
+ rc = __cxl_parse_cfmws(cfmws, ctx);
+ if (rc)
+ dev_err(dev,
+ "Failed to add decode range: [%#llx - %#llx] (%d)\n",
+ cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1, rc);
+ else
+ dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
+ phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1);
+
+ /* never fail cxl_acpi load for a single window failure */
+ return 0;
+}
+
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
struct device *dev)
{
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 6fe11546889f..08fd0baea7a0 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -210,19 +210,12 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
return 0;
}
-static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
- struct list_head *list)
+static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
+ struct cxl_dpa_perf *dpa_perf)
{
- struct cxl_dpa_perf *dpa_perf;
-
- dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
- if (!dpa_perf)
- return;
-
dpa_perf->dpa_range = dent->dpa_range;
dpa_perf->coord = dent->coord;
dpa_perf->qos_class = dent->qos_class;
- list_add_tail(&dpa_perf->list, list);
dev_dbg(dev,
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
dent->dpa_range.start, dpa_perf->qos_class,
@@ -230,20 +223,6 @@ static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
dent->coord.read_latency, dent->coord.write_latency);
}
-static void free_perf_ents(void *data)
-{
- struct cxl_memdev_state *mds = data;
- struct cxl_dpa_perf *dpa_perf, *n;
- LIST_HEAD(discard);
-
- list_splice_tail_init(&mds->ram_perf_list, &discard);
- list_splice_tail_init(&mds->pmem_perf_list, &discard);
- list_for_each_entry_safe(dpa_perf, n, &discard, list) {
- list_del(&dpa_perf->list);
- kfree(dpa_perf);
- }
-}
-
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
struct xarray *dsmas_xa)
{
@@ -263,16 +242,14 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
xa_for_each(dsmas_xa, index, dent) {
if (resource_size(&cxlds->ram_res) &&
range_contains(&ram_range, &dent->dpa_range))
- add_perf_entry(dev, dent, &mds->ram_perf_list);
+ update_perf_entry(dev, dent, &mds->ram_perf);
else if (resource_size(&cxlds->pmem_res) &&
range_contains(&pmem_range, &dent->dpa_range))
- add_perf_entry(dev, dent, &mds->pmem_perf_list);
+ update_perf_entry(dev, dent, &mds->pmem_perf);
else
dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
dent->dpa_range.start);
}
-
- devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
}
static int match_cxlrd_qos_class(struct device *dev, void *data)
@@ -293,24 +270,24 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
return 0;
}
-static void cxl_qos_match(struct cxl_port *root_port,
- struct list_head *work_list,
- struct list_head *discard_list)
+static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
{
- struct cxl_dpa_perf *dpa_perf, *n;
+ *dpa_perf = (struct cxl_dpa_perf) {
+ .qos_class = CXL_QOS_CLASS_INVALID,
+ };
+}
- list_for_each_entry_safe(dpa_perf, n, work_list, list) {
- int rc;
+static bool cxl_qos_match(struct cxl_port *root_port,
+ struct cxl_dpa_perf *dpa_perf)
+{
+ if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
+ return false;
- if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
- return;
+ if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
+ match_cxlrd_qos_class))
+ return false;
- rc = device_for_each_child(&root_port->dev,
- (void *)&dpa_perf->qos_class,
- match_cxlrd_qos_class);
- if (!rc)
- list_move_tail(&dpa_perf->list, discard_list);
- }
+ return true;
}
static int match_cxlrd_hb(struct device *dev, void *data)
@@ -334,23 +311,10 @@ static int match_cxlrd_hb(struct device *dev, void *data)
return 0;
}
-static void discard_dpa_perf(struct list_head *list)
-{
- struct cxl_dpa_perf *dpa_perf, *n;
-
- list_for_each_entry_safe(dpa_perf, n, list, list) {
- list_del(&dpa_perf->list);
- kfree(dpa_perf);
- }
-}
-DEFINE_FREE(dpa_perf, struct list_head *, if (!list_empty(_T)) discard_dpa_perf(_T))
-
static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- LIST_HEAD(__discard);
- struct list_head *discard __free(dpa_perf) = &__discard;
struct cxl_port *root_port;
int rc;
@@ -363,16 +327,17 @@ static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
root_port = &cxl_root->port;
/* Check that the QTG IDs are all sane between end device and root decoders */
- cxl_qos_match(root_port, &mds->ram_perf_list, discard);
- cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
+ if (!cxl_qos_match(root_port, &mds->ram_perf))
+ reset_dpa_perf(&mds->ram_perf);
+ if (!cxl_qos_match(root_port, &mds->pmem_perf))
+ reset_dpa_perf(&mds->pmem_perf);
/* Check to make sure that the device's host bridge is under a root decoder */
rc = device_for_each_child(&root_port->dev,
- (void *)cxlmd->endpoint->host_bridge,
- match_cxlrd_hb);
+ cxlmd->endpoint->host_bridge, match_cxlrd_hb);
if (!rc) {
- list_splice_tail_init(&mds->ram_perf_list, discard);
- list_splice_tail_init(&mds->pmem_perf_list, discard);
+ reset_dpa_perf(&mds->ram_perf);
+ reset_dpa_perf(&mds->pmem_perf);
}
return rc;
@@ -417,6 +382,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
cxl_memdev_set_qos_class(cxlds, dsmas_xa);
cxl_qos_class_verify(cxlmd);
+ cxl_memdev_update_perf(cxlmd);
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 27166a411705..9adda4795eb7 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1391,8 +1391,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mds->cxlds.reg_map.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
- INIT_LIST_HEAD(&mds->ram_perf_list);
- INIT_LIST_HEAD(&mds->pmem_perf_list);
+ mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
+ mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
return mds;
}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index dae8802ecdb0..d4e259f3a7e9 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = {
NULL,
};
+static ssize_t pmem_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_pmem_qos_class =
+ __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_pmem_attributes[] = {
&dev_attr_pmem_size.attr,
+ &dev_attr_pmem_qos_class.attr,
NULL,
};
+static ssize_t ram_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_ram_qos_class =
+ __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_ram_attributes[] = {
&dev_attr_ram_size.attr,
+ &dev_attr_ram_qos_class.attr,
NULL,
};
@@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = {
.is_visible = cxl_memdev_visible,
};
+static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_ram_qos_class.attr)
+ if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_ram_attribute_group = {
.name = "ram",
.attrs = cxl_memdev_ram_attributes,
+ .is_visible = cxl_ram_visible,
};
+static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_pmem_qos_class.attr)
+ if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_pmem_attribute_group = {
.name = "pmem",
.attrs = cxl_memdev_pmem_attributes,
+ .is_visible = cxl_pmem_visible,
};
static umode_t cxl_memdev_security_visible(struct kobject *kobj,
@@ -519,6 +575,13 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
NULL,
};
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
+{
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL);
+
static const struct device_type cxl_memdev_type = {
.name = "cxl_memdev",
.release = cxl_memdev_release,
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 6c9c8d92f8f7..e9e6c81ce034 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -477,9 +477,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
allowed++;
}
- if (!allowed) {
- cxl_set_mem_enable(cxlds, 0);
- info->mem_enabled = 0;
+ if (!allowed && info->mem_enabled) {
+ dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
+ return -ENXIO;
}
/*
@@ -932,11 +932,21 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
void cxl_cor_error_detected(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct device *dev = &cxlds->cxlmd->dev;
+
+ scoped_guard(device, dev) {
+ if (!dev->driver) {
+ dev_warn(&pdev->dev,
+ "%s: memdev disabled, abort error handling\n",
+ dev_name(dev));
+ return;
+ }
- if (cxlds->rcd)
- cxl_handle_rdport_errors(cxlds);
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
- cxl_handle_endpoint_cor_ras(cxlds);
+ cxl_handle_endpoint_cor_ras(cxlds);
+ }
}
EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
@@ -948,16 +958,25 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
struct device *dev = &cxlmd->dev;
bool ue;
- if (cxlds->rcd)
- cxl_handle_rdport_errors(cxlds);
+ scoped_guard(device, dev) {
+ if (!dev->driver) {
+ dev_warn(&pdev->dev,
+ "%s: memdev disabled, abort error handling\n",
+ dev_name(dev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
+ /*
+ * A frozen channel indicates an impending reset which is fatal to
+ * CXL.mem operation, and will likely crash the system. On the off
+ * chance the situation is recoverable dump the status of the RAS
+ * capability registers and bounce the active state of the memdev.
+ */
+ ue = cxl_handle_endpoint_ras(cxlds);
+ }
- /*
- * A frozen channel indicates an impending reset which is fatal to
- * CXL.mem operation, and will likely crash the system. On the off
- * chance the situation is recoverable dump the status of the RAS
- * capability registers and bounce the active state of the memdev.
- */
- ue = cxl_handle_endpoint_ras(cxlds);
switch (state) {
case pci_channel_io_normal:
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 0f05692bfec3..4c7fd2d5cccb 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -525,7 +525,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_region_params *p = &cxlr->params;
struct resource *res;
- u32 remainder = 0;
+ u64 remainder = 0;
lockdep_assert_held_write(&cxl_region_rwsem);
@@ -545,7 +545,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
(cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
return -ENXIO;
- div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
+ div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
if (remainder)
return -EINVAL;
@@ -730,12 +730,17 @@ static int match_auto_decoder(struct device *dev, void *data)
return 0;
}
-static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
- struct cxl_region *cxlr)
+static struct cxl_decoder *
+cxl_region_find_decoder(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region *cxlr)
{
struct device *dev;
int id = 0;
+ if (port == cxled_to_port(cxled))
+ return &cxled->cxld;
+
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
dev = device_find_child(&port->dev, &cxlr->params,
match_auto_decoder);
@@ -753,8 +758,31 @@ static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
return to_cxl_decoder(dev);
}
-static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
- struct cxl_region *cxlr)
+static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
+ struct cxl_decoder *cxld)
+{
+ struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
+ struct cxl_decoder *cxld_iter = rr->decoder;
+
+ /*
+ * Allow the out of order assembly of auto-discovered regions.
+ * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
+ * in HPA order. Confirm that the decoder with the lesser HPA
+ * starting address has the lesser id.
+ */
+ dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
+ dev_name(&cxld->dev), cxld->id,
+ dev_name(&cxld_iter->dev), cxld_iter->id);
+
+ if (cxld_iter->id > cxld->id)
+ return true;
+
+ return false;
+}
+
+static struct cxl_region_ref *
+alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_region_ref *cxl_rr, *iter;
@@ -764,16 +792,21 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
xa_for_each(&port->regions, index, iter) {
struct cxl_region_params *ip = &iter->region->params;
- if (!ip->res)
+ if (!ip->res || ip->res->start < p->res->start)
continue;
- if (ip->res->start > p->res->start) {
- dev_dbg(&cxlr->dev,
- "%s: HPA order violation %s:%pr vs %pr\n",
- dev_name(&port->dev),
- dev_name(&iter->region->dev), ip->res, p->res);
- return ERR_PTR(-EBUSY);
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_region_find_decoder(port, cxled, cxlr);
+ if (auto_order_ok(port, iter->region, cxld))
+ continue;
}
+ dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
+ dev_name(&port->dev),
+ dev_name(&iter->region->dev), ip->res, p->res);
+
+ return ERR_PTR(-EBUSY);
}
cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
@@ -853,10 +886,7 @@ static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
{
struct cxl_decoder *cxld;
- if (port == cxled_to_port(cxled))
- cxld = &cxled->cxld;
- else
- cxld = cxl_region_find_decoder(port, cxlr);
+ cxld = cxl_region_find_decoder(port, cxled, cxlr);
if (!cxld) {
dev_dbg(&cxlr->dev, "%s: no decoder available\n",
dev_name(&port->dev));
@@ -953,7 +983,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
nr_targets_inc = true;
}
} else {
- cxl_rr = alloc_region_ref(port, cxlr);
+ cxl_rr = alloc_region_ref(port, cxlr, cxled);
if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 89445435303a..bdf117a33744 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -338,7 +338,7 @@ TRACE_EVENT(cxl_general_media,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(&__entry->hdr_uuid, &CXL_EVENT_GEN_MEDIA_UUID, sizeof(uuid_t));
+ __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID;
/* General Media */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -425,7 +425,7 @@ TRACE_EVENT(cxl_dram,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(&__entry->hdr_uuid, &CXL_EVENT_DRAM_UUID, sizeof(uuid_t));
+ __entry->hdr_uuid = CXL_EVENT_DRAM_UUID;
/* DRAM */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -573,7 +573,7 @@ TRACE_EVENT(cxl_memory_module,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(&__entry->hdr_uuid, &CXL_EVENT_MEM_MODULE_UUID, sizeof(uuid_t));
+ __entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID;
/* Memory Module Event */
__entry->event_type = rec->event_type;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index b6017c0c57b4..003feebab79b 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -880,6 +880,8 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
+
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 5303d6942b88..20fb3b35e89e 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -395,13 +395,11 @@ enum cxl_devtype {
/**
* struct cxl_dpa_perf - DPA performance property entry
- * @list - list entry
* @dpa_range - range for DPA address
* @coord - QoS performance data (i.e. latency, bandwidth)
* @qos_class - QoS Class cookies
*/
struct cxl_dpa_perf {
- struct list_head list;
struct range dpa_range;
struct access_coordinate coord;
int qos_class;
@@ -471,8 +469,8 @@ struct cxl_dev_state {
* @security: security driver state info
* @fw: firmware upload / activation state
* @mbox_send: @dev specific transport for transmitting mailbox commands
- * @ram_perf_list: performance data entries matched to RAM
- * @pmem_perf_list: performance data entries matched to PMEM
+ * @ram_perf: performance data entry matched to RAM partition
+ * @pmem_perf: performance data entry matched to PMEM partition
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -494,8 +492,8 @@ struct cxl_memdev_state {
u64 next_volatile_bytes;
u64 next_persistent_bytes;
- struct list_head ram_perf_list;
- struct list_head pmem_perf_list;
+ struct cxl_dpa_perf ram_perf;
+ struct cxl_dpa_perf pmem_perf;
struct cxl_event_state event;
struct cxl_poison_state poison;
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index c5c9d8e0d88d..0c79d9ce877c 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -215,52 +215,6 @@ static ssize_t trigger_poison_list_store(struct device *dev,
}
static DEVICE_ATTR_WO(trigger_poison_list);
-static ssize_t ram_qos_class_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct cxl_dpa_perf *dpa_perf;
-
- if (!dev->driver)
- return -ENOENT;
-
- if (list_empty(&mds->ram_perf_list))
- return -ENOENT;
-
- dpa_perf = list_first_entry(&mds->ram_perf_list, struct cxl_dpa_perf,
- list);
-
- return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
-}
-
-static struct device_attribute dev_attr_ram_qos_class =
- __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
-
-static ssize_t pmem_qos_class_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct cxl_dpa_perf *dpa_perf;
-
- if (!dev->driver)
- return -ENOENT;
-
- if (list_empty(&mds->pmem_perf_list))
- return -ENOENT;
-
- dpa_perf = list_first_entry(&mds->pmem_perf_list, struct cxl_dpa_perf,
- list);
-
- return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
-}
-
-static struct device_attribute dev_attr_pmem_qos_class =
- __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
-
static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
@@ -272,21 +226,11 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
mds->poison.enabled_cmds))
return 0;
- if (a == &dev_attr_pmem_qos_class.attr)
- if (list_empty(&mds->pmem_perf_list))
- return 0;
-
- if (a == &dev_attr_ram_qos_class.attr)
- if (list_empty(&mds->ram_perf_list))
- return 0;
-
return a->mode;
}
static struct attribute *cxl_mem_attrs[] = {
&dev_attr_trigger_poison_list.attr,
- &dev_attr_ram_qos_class.attr,
- &dev_attr_pmem_qos_class.attr,
NULL
};
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 4fd1f207c84e..2ff361e756d6 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -382,7 +382,7 @@ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
return rc;
}
-static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
@@ -441,7 +441,7 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
/* background command interrupts are optional */
- if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
+ if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) || !irq_avail)
return 0;
msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
@@ -588,7 +588,7 @@ static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
}
-static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
+static bool cxl_alloc_irq_vectors(struct pci_dev *pdev)
{
int nvecs;
@@ -605,9 +605,9 @@ static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nvecs < 1) {
dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
- return -ENXIO;
+ return false;
}
- return 0;
+ return true;
}
static irqreturn_t cxl_event_thread(int irq, void *id)
@@ -743,7 +743,7 @@ static bool cxl_event_int_is_fw(u8 setting)
}
static int cxl_event_config(struct pci_host_bridge *host_bridge,
- struct cxl_memdev_state *mds)
+ struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_event_interrupt_policy policy;
int rc;
@@ -755,6 +755,11 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
if (!host_bridge->native_cxl_error)
return 0;
+ if (!irq_avail) {
+ dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n");
+ return 0;
+ }
+
rc = cxl_mem_alloc_event_buf(mds);
if (rc)
return rc;
@@ -789,6 +794,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
int i, rc, pmu_count;
+ bool irq_avail;
/*
* Double check the anonymous union trickery in struct cxl_regs
@@ -846,11 +852,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
else
dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
- rc = cxl_alloc_irq_vectors(pdev);
- if (rc)
- return rc;
+ irq_avail = cxl_alloc_irq_vectors(pdev);
- rc = cxl_pci_setup_mailbox(mds);
+ rc = cxl_pci_setup_mailbox(mds, irq_avail);
if (rc)
return rc;
@@ -909,7 +913,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
- rc = cxl_event_config(host_bridge, mds);
+ rc = cxl_event_config(host_bridge, mds, irq_avail);
if (rc)
return rc;
@@ -970,61 +974,6 @@ static struct pci_driver cxl_pci_driver = {
},
};
-#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0)
-static void cxl_cper_event_call(enum cxl_event_type ev_type,
- struct cxl_cper_event_rec *rec)
-{
- struct cper_cxl_event_devid *device_id = &rec->hdr.device_id;
- struct pci_dev *pdev __free(pci_dev_put) = NULL;
- enum cxl_event_log_type log_type;
- struct cxl_dev_state *cxlds;
- unsigned int devfn;
- u32 hdr_flags;
-
- devfn = PCI_DEVFN(device_id->device_num, device_id->func_num);
- pdev = pci_get_domain_bus_and_slot(device_id->segment_num,
- device_id->bus_num, devfn);
- if (!pdev)
- return;
-
- guard(pci_dev)(pdev);
- if (pdev->driver != &cxl_pci_driver)
- return;
-
- cxlds = pci_get_drvdata(pdev);
- if (!cxlds)
- return;
-
- /* Fabricate a log type */
- hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags);
- log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags);
-
- cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type,
- &uuid_null, &rec->event);
-}
-
-static int __init cxl_pci_driver_init(void)
-{
- int rc;
-
- rc = cxl_cper_register_callback(cxl_cper_event_call);
- if (rc)
- return rc;
-
- rc = pci_register_driver(&cxl_pci_driver);
- if (rc)
- cxl_cper_unregister_callback(cxl_cper_event_call);
-
- return rc;
-}
-
-static void __exit cxl_pci_driver_exit(void)
-{
- pci_unregister_driver(&cxl_pci_driver);
- cxl_cper_unregister_callback(cxl_cper_event_call);
-}
-
-module_init(cxl_pci_driver_init);
-module_exit(cxl_pci_driver_exit);
+module_pci_driver(cxl_pci_driver);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index ee899f8e6721..4a63567e93ba 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -168,10 +168,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
if (vmf->pgoff > buffer->pagecount)
return VM_FAULT_SIGBUS;
- vmf->page = buffer->pages[vmf->pgoff];
- get_page(vmf->page);
-
- return 0;
+ return vmf_insert_pfn(vma, vmf->address, page_to_pfn(buffer->pages[vmf->pgoff]));
}
static const struct vm_operations_struct dma_heap_vm_ops = {
@@ -185,6 +182,8 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
vma->vm_ops = &dma_heap_vm_ops;
vma->vm_private_data = buffer;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fb89ecbf0cc5..40052d1bd0b5 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -222,8 +222,14 @@ struct atdma_sg {
* @vd: pointer to the virtual dma descriptor.
* @atchan: pointer to the atmel dma channel.
* @total_len: total transaction byte count
- * @sg_len: number of sg entries.
+ * @sglen: number of sg entries.
* @sg: array of sgs.
+ * @boundary: number of transfers to perform before the automatic address increment operation
+ * @dst_hole: value to add to the destination address when the boundary has been reached
+ * @src_hole: value to add to the source address when the boundary has been reached
+ * @memset_buffer: buffer used for the memset operation
+ * @memset_paddr: physical address of the buffer used for the memset operation
+ * @memset_vaddr: virtual address of the buffer used for the memset operation
*/
struct at_desc {
struct virt_dma_desc vd;
@@ -245,7 +251,10 @@ struct at_desc {
/*-- Channels --------------------------------------------------------*/
/**
- * atc_status - information bits stored in channel status flag
+ * enum atc_status - information bits stored in channel status flag
+ *
+ * @ATC_IS_PAUSED: If channel is pauses
+ * @ATC_IS_CYCLIC: If channel is cyclic
*
* Manipulated with atomic operations.
*/
@@ -282,7 +291,6 @@ struct at_dma_chan {
u32 save_cfg;
u32 save_dscr;
struct dma_slave_config dma_sconfig;
- bool cyclic;
struct at_desc *desc;
};
@@ -328,12 +336,12 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @dma_device: dmaengine dma_device object members
- * @atdma_devtype: identifier of DMA controller compatibility
- * @ch_regs: memory mapped register base
+ * @regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @lli_pool: hw lli table
+ * @memset_pool: hw memset pool
* @chan: channels table to store at_dma_chan structures
*/
struct at_dma {
@@ -626,6 +634,9 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
/**
* atc_get_llis_residue - Get residue for a hardware linked list transfer
+ * @atchan: pointer to an atmel hdmac channel.
+ * @desc: pointer to the descriptor for which the residue is calculated.
+ * @residue: residue to be set to dma_tx_state.
*
* Calculate the residue by removing the length of the Linked List Item (LLI)
* already transferred from the total length. To get the current LLI we can use
@@ -661,10 +672,8 @@ static inline u32 atc_calc_bytes_left(u32 current_len, u32 ctrla)
* two DSCR values are different, we read again the CTRLA then the DSCR till two
* consecutive read values from DSCR are equal or till the maximum trials is
* reach. This algorithm is very unlikely not to find a stable value for DSCR.
- * @atchan: pointer to an atmel hdmac channel.
- * @desc: pointer to the descriptor for which the residue is calculated.
- * @residue: residue to be set to dma_tx_state.
- * Returns 0 on success, -errno otherwise.
+ *
+ * Returns: %0 on success, -errno otherwise.
*/
static int atc_get_llis_residue(struct at_dma_chan *atchan,
struct at_desc *desc, u32 *residue)
@@ -731,7 +740,8 @@ static int atc_get_llis_residue(struct at_dma_chan *atchan,
* @chan: DMA channel
* @cookie: transaction identifier to check status of
* @residue: residue to be updated.
- * Return 0 on success, -errono otherwise.
+ *
+ * Return: %0 on success, -errno otherwise.
*/
static int atc_get_residue(struct dma_chan *chan, dma_cookie_t cookie,
u32 *residue)
@@ -1710,7 +1720,7 @@ static void atc_issue_pending(struct dma_chan *chan)
* atc_alloc_chan_resources - allocate resources for DMA channel
* @chan: allocate descriptor resources for this channel
*
- * return - the number of allocated descriptors
+ * Return: the number of allocated descriptors
*/
static int atc_alloc_chan_resources(struct dma_chan *chan)
{
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index b38786f0ad79..b75fdaffad9a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
+static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
+{
+ /*
+ * In case of remote eDMA engine setup, the DW PCIe RP/EP internal
+ * configuration registers and application memory are normally accessed
+ * over different buses. Ensure LL-data reaches the memory before the
+ * doorbell register is toggled by issuing the dummy-read from the remote
+ * LL memory in a hope that the MRd TLP will return only after the
+ * last MWr TLP is completed
+ */
+ if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ readl(chunk->ll_region.vaddr.io);
+}
+
static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
@@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
}
+
+ dw_edma_v0_sync_ll_data(chunk);
+
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
index 00b735a0202a..10e8f0715114 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
@@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw)
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{
- u32 num_ch = 0;
- int id;
-
- for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
- if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
- num_ch++;
- }
-
- if (num_ch > HDMA_V0_MAX_NR_CH)
- num_ch = HDMA_V0_MAX_NR_CH;
-
- return (u16)num_ch;
+ /*
+ * The HDMA IP have no way to know the number of hardware channels
+ * available, we set it to maximum channels and let the platform
+ * set the right number of channels.
+ */
+ return HDMA_V0_MAX_NR_CH;
}
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
@@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
+static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
+{
+ /*
+ * In case of remote HDMA engine setup, the DW PCIe RP/EP internal
+ * configuration registers and application memory are normally accessed
+ * over different buses. Ensure LL-data reaches the memory before the
+ * doorbell register is toggled by issuing the dummy-read from the remote
+ * LL memory in a hope that the MRd TLP will return only after the
+ * last MWr TLP is completed
+ */
+ if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ readl(chunk->ll_region.vaddr.io);
+}
+
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
@@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Interrupt enable&unmask - done, abort */
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
- HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
+ HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
+ if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
/* Channel control */
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
@@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Set consumer cycle */
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+
+ dw_hdma_v0_sync_ll_data(chunk);
+
/* Doorbell */
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
}
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-regs.h b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
index a974abdf8aaf..eab5fd7177e5 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-regs.h
+++ b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
@@ -15,7 +15,7 @@
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
-#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3)
+#define HDMA_V0_REMOTE_STOP_INT_EN BIT(3)
#define HDMA_V0_ABORT_INT_MASK BIT(2)
#define HDMA_V0_STOP_INT_MASK BIT(0)
#define HDMA_V0_LINKLIST_EN BIT(0)
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
index 7958ac33e36c..5a8061a307cd 100644
--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -38,15 +38,17 @@ static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
if (!dpaa2_chan->fd_pool)
goto err;
- dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
- sizeof(struct dpaa2_fl_entry),
- sizeof(struct dpaa2_fl_entry), 0);
+ dpaa2_chan->fl_pool =
+ dma_pool_create("fl_pool", dev,
+ sizeof(struct dpaa2_fl_entry) * 3,
+ sizeof(struct dpaa2_fl_entry), 0);
+
if (!dpaa2_chan->fl_pool)
goto err_fd;
dpaa2_chan->sdd_pool =
dma_pool_create("sdd_pool", dev,
- sizeof(struct dpaa2_qdma_sd_d),
+ sizeof(struct dpaa2_qdma_sd_d) * 2,
sizeof(struct dpaa2_qdma_sd_d), 0);
if (!dpaa2_chan->sdd_pool)
goto err_fl;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b53f46245c37..793f1a7ad5e3 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -503,7 +503,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
if (fsl_chan->is_multi_fifo) {
/* set mloff to support multiple fifo */
burst = cfg->direction == DMA_DEV_TO_MEM ?
- cfg->src_addr_width : cfg->dst_addr_width;
+ cfg->src_maxburst : cfg->dst_maxburst;
nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
/* enable DMLOE/SMLOE */
if (cfg->direction == DMA_MEM_TO_DEV) {
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index bb5221158a77..f5e216b157c7 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -30,8 +30,9 @@
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
-#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
-#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
+#define EDMA_TCD_ITER_MASK GENMASK(14, 0)
+#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
+#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
#define EDMA_TCD_CSR_START BIT(0)
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 45cc419b1b4a..d36e28b9c767 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -10,6 +10,7 @@
*/
#include <dt-bindings/dma/fsl-edma.h>
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
@@ -582,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
DMAENGINE_ALIGN_32_BYTES;
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
- dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
+ dma_set_max_seg_size(fsl_edma->dma_dev.dev,
+ FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index a1d0aa63142a..5005e138fc23 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -109,6 +109,7 @@
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
#define FSL_QDMA_CMD_DSEN_OFFSET 19
#define FSL_QDMA_CMD_LWC_OFFSET 16
+#define FSL_QDMA_CMD_PF BIT(17)
/* Field definition for Descriptor status */
#define QDMA_CCDF_STATUS_RTE BIT(5)
@@ -160,6 +161,10 @@ struct fsl_qdma_format {
u8 __reserved1[2];
u8 cfg8b_w1;
} __packed;
+ struct {
+ __le32 __reserved2;
+ __le32 cmd;
+ } __packed;
__le64 data;
};
} __packed;
@@ -354,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
dma_addr_t dst, dma_addr_t src, u32 len)
{
- u32 cmd;
struct fsl_qdma_format *sdf, *ddf;
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
@@ -383,14 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
/* This entry is the last entry. */
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
- sdf->data = QDMA_SDDF_CMD(cmd);
-
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
- cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
- ddf->data = QDMA_SDDF_CMD(cmd);
+ sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ FSL_QDMA_CMD_PF);
+
+ ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ (FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET));
}
/*
@@ -514,11 +515,11 @@ static struct fsl_qdma_queue
queue_temp = queue_head + i + (j * queue_num);
queue_temp->cq =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct fsl_qdma_format) *
- queue_size[i],
- &queue_temp->bus_addr,
- GFP_KERNEL);
+ dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ queue_size[i],
+ &queue_temp->bus_addr,
+ GFP_KERNEL);
if (!queue_temp->cq)
return NULL;
queue_temp->block_base = fsl_qdma->block_base +
@@ -563,15 +564,14 @@ static struct fsl_qdma_queue
/*
* Buffer for queue command
*/
- status_head->cq = dma_alloc_coherent(&pdev->dev,
- sizeof(struct fsl_qdma_format) *
- status_size,
- &status_head->bus_addr,
- GFP_KERNEL);
- if (!status_head->cq) {
- devm_kfree(&pdev->dev, status_head);
+ status_head->cq = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct fsl_qdma_format) *
+ status_size,
+ &status_head->bus_addr,
+ GFP_KERNEL);
+ if (!status_head->cq)
return NULL;
- }
+
status_head->n_cq = status_size;
status_head->virt_head = status_head->cq;
status_head->virt_tail = status_head->cq;
@@ -625,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
- void *block,
+ __iomem void *block,
int id)
{
bool duplicate;
@@ -1197,10 +1197,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->queue)
return -ENOMEM;
- ret = fsl_qdma_irq_init(pdev, fsl_qdma);
- if (ret)
- return ret;
-
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base;
@@ -1239,16 +1235,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_qdma);
- ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
- dev_err(&pdev->dev,
- "Can't register NXP Layerscape qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
- ret = fsl_qdma_reg_init(fsl_qdma);
+ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+ if (ret)
+ return ret;
+
+ ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
- dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
@@ -1268,8 +1267,6 @@ static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
static void fsl_qdma_remove(struct platform_device *pdev)
{
- int i;
- struct fsl_qdma_queue *status;
struct device_node *np = pdev->dev.of_node;
struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
@@ -1277,12 +1274,6 @@ static void fsl_qdma_remove(struct platform_device *pdev)
fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_qdma->dma_dev);
-
- for (i = 0; i < fsl_qdma->block_number; i++) {
- status = fsl_qdma->status[i];
- dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
- status->n_cq, status->cq, status->bus_addr);
- }
}
static const struct of_device_id fsl_qdma_dt_ids[] = {
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 77f8885cf407..e5a94a93a3cc 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
spin_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
- h = evl->head;
+ h = status.head;
size = evl->size;
while (h != t) {
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index 9cfbd9b14c4c..f3f25ee676f3 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
spin_lock(&evl->lock);
- h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
+ h = evl_status.head;
evl_size = evl->size;
seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 47de3f93ff1e..d0f5db6cf1ed 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -300,7 +300,6 @@ struct idxd_evl {
unsigned int log_size;
/* The number of entries in the event log. */
u16 size;
- u16 head;
unsigned long *bmap;
bool batch_fail[IDXD_MAX_BATCH_IDENT];
};
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 14df1f1347a8..4954adc6bb60 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -343,7 +343,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
static int idxd_init_evl(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
+ unsigned int evl_cache_size;
struct idxd_evl *evl;
+ const char *idxd_name;
if (idxd->hw.gen_cap.evl_support == 0)
return 0;
@@ -355,9 +357,16 @@ static int idxd_init_evl(struct idxd_device *idxd)
spin_lock_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
- idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
- sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
- 0, 0, NULL);
+ idxd_name = dev_name(idxd_confdev(idxd));
+ evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
+ /*
+ * Since completion record in evl_cache will be copied to user
+ * when handling completion record page fault, need to create
+ * the cache suitable for user copy.
+ */
+ idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
+ 0, 0, 0, evl_cache_size,
+ NULL);
if (!idxd->evl_cache) {
kfree(evl);
return -ENOMEM;
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index c8a0aa874b11..348aa21389a9 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd)
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
- h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
+ h = evl_status.head;
size = idxd->evl->size;
while (h != t) {
@@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd)
h = (h + 1) % size;
}
- evl->head = h;
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
spin_unlock(&evl->lock);
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
index 1aa65e5de0f3..f79240734807 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
chan->vc.desc_free = pt_do_cleanup;
vchan_init(&chan->vc, dma_dev);
- dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
-
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_reg;
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index f1f920861fa9..5f8d2e93ff3f 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2404,6 +2404,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
dev_name(dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
+
ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
ecc);
if (ret) {
@@ -2420,6 +2425,11 @@ static int edma_probe(struct platform_device *pdev)
if (irq > 0) {
irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
dev_name(dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err_disable_pm;
+ }
+
ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
ecc);
if (ret) {
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 2841a539c264..6400d06588a2 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -3968,6 +3968,7 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
{
struct udma_chan *uc = to_udma_chan(&vc->chan);
struct udma_desc *d;
+ u8 status;
if (!vd)
return;
@@ -3977,12 +3978,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
if (d->metadata_size)
udma_fetch_epib(uc, d);
- /* Provide residue information for the client */
if (result) {
void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
if (cppi5_desc_get_type(desc_vaddr) ==
CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
+ /* Provide residue information for the client */
result->residue = d->residue -
cppi5_hdesc_get_pktlen(desc_vaddr);
if (result->residue)
@@ -3991,7 +3992,12 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
result->result = DMA_TRANS_NOERROR;
} else {
result->residue = 0;
- result->result = DMA_TRANS_NOERROR;
+ /* Propagate TR Response errors to the client */
+ status = d->hwdesc[0].tr_resp_base->status;
+ if (status)
+ result->result = DMA_TRANS_ABORTED;
+ else
+ result->result = DMA_TRANS_NOERROR;
}
}
}
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 1eca8cc271f8..241db366b2c7 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -29,8 +29,6 @@ static u32 dpll_pin_xa_id;
WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
#define ASSERT_DPLL_NOT_REGISTERED(d) \
WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
-#define ASSERT_PIN_REGISTERED(p) \
- WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
struct dpll_device_registration {
struct list_head list;
@@ -44,6 +42,11 @@ struct dpll_pin_registration {
void *priv;
};
+struct dpll_pin *netdev_dpll_pin(const struct net_device *dev)
+{
+ return rcu_dereference_rtnl(dev->dpll_pin);
+}
+
struct dpll_device *dpll_device_get_by_id(int id)
{
if (xa_get_mark(&dpll_device_xa, id, DPLL_REGISTERED))
@@ -425,6 +428,53 @@ void dpll_device_unregister(struct dpll_device *dpll,
}
EXPORT_SYMBOL_GPL(dpll_device_unregister);
+static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
+{
+ kfree(prop->package_label);
+ kfree(prop->panel_label);
+ kfree(prop->board_label);
+ kfree(prop->freq_supported);
+}
+
+static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
+ struct dpll_pin_properties *dst)
+{
+ memcpy(dst, src, sizeof(*dst));
+ if (src->freq_supported && src->freq_supported_num) {
+ size_t freq_size = src->freq_supported_num *
+ sizeof(*src->freq_supported);
+ dst->freq_supported = kmemdup(src->freq_supported,
+ freq_size, GFP_KERNEL);
+ if (!src->freq_supported)
+ return -ENOMEM;
+ }
+ if (src->board_label) {
+ dst->board_label = kstrdup(src->board_label, GFP_KERNEL);
+ if (!dst->board_label)
+ goto err_board_label;
+ }
+ if (src->panel_label) {
+ dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL);
+ if (!dst->panel_label)
+ goto err_panel_label;
+ }
+ if (src->package_label) {
+ dst->package_label = kstrdup(src->package_label, GFP_KERNEL);
+ if (!dst->package_label)
+ goto err_package_label;
+ }
+
+ return 0;
+
+err_package_label:
+ kfree(dst->panel_label);
+err_panel_label:
+ kfree(dst->board_label);
+err_board_label:
+ kfree(dst->freq_supported);
+ return -ENOMEM;
+}
+
static struct dpll_pin *
dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
const struct dpll_pin_properties *prop)
@@ -441,20 +491,24 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX ||
prop->type > DPLL_PIN_TYPE_MAX)) {
ret = -EINVAL;
- goto err;
+ goto err_pin_prop;
}
- pin->prop = prop;
+ ret = dpll_pin_prop_dup(prop, &pin->prop);
+ if (ret)
+ goto err_pin_prop;
refcount_set(&pin->refcount, 1);
xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
if (ret)
- goto err;
+ goto err_xa_alloc;
return pin;
-err:
+err_xa_alloc:
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
+ dpll_pin_prop_free(&pin->prop);
+err_pin_prop:
kfree(pin);
return ERR_PTR(ret);
}
@@ -514,7 +568,8 @@ void dpll_pin_put(struct dpll_pin *pin)
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
xa_erase(&dpll_pin_xa, pin->id);
- kfree(pin);
+ dpll_pin_prop_free(&pin->prop);
+ kfree_rcu(pin, rcu);
}
mutex_unlock(&dpll_lock);
}
@@ -564,8 +619,6 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
WARN_ON(!ops->state_on_dpll_get) ||
WARN_ON(!ops->direction_get))
return -EINVAL;
- if (ASSERT_DPLL_REGISTERED(dpll))
- return -EINVAL;
mutex_lock(&dpll_lock);
if (WARN_ON(!(dpll->module == pin->module &&
@@ -636,15 +689,13 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
unsigned long i, stop;
int ret;
- if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX))
+ if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX))
return -EINVAL;
if (WARN_ON(!ops) ||
WARN_ON(!ops->state_on_pin_get) ||
WARN_ON(!ops->direction_get))
return -EINVAL;
- if (ASSERT_PIN_REGISTERED(parent))
- return -EINVAL;
mutex_lock(&dpll_lock);
ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
index 5585873c5c1b..2b6d8ef1cdf3 100644
--- a/drivers/dpll/dpll_core.h
+++ b/drivers/dpll/dpll_core.h
@@ -44,9 +44,10 @@ struct dpll_device {
* @module: module of creator
* @dpll_refs: hold referencees to dplls pin was registered with
* @parent_refs: hold references to parent pins pin was registered with
- * @prop: pointer to pin properties given by registerer
+ * @prop: pin properties copied from the registerer
* @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
+ * @rcu: rcu_head for kfree_rcu()
**/
struct dpll_pin {
u32 id;
@@ -55,8 +56,9 @@ struct dpll_pin {
struct module *module;
struct xarray dpll_refs;
struct xarray parent_refs;
- const struct dpll_pin_properties *prop;
+ struct dpll_pin_properties prop;
refcount_t refcount;
+ struct rcu_head rcu;
};
/**
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 3370dbddb86b..4ca9ad16cd95 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -303,17 +303,17 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq,
DPLL_A_PIN_PAD))
return -EMSGSIZE;
- for (fs = 0; fs < pin->prop->freq_supported_num; fs++) {
+ for (fs = 0; fs < pin->prop.freq_supported_num; fs++) {
nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED);
if (!nest)
return -EMSGSIZE;
- freq = pin->prop->freq_supported[fs].min;
+ freq = pin->prop.freq_supported[fs].min;
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq),
&freq, DPLL_A_PIN_PAD)) {
nla_nest_cancel(msg, nest);
return -EMSGSIZE;
}
- freq = pin->prop->freq_supported[fs].max;
+ freq = pin->prop.freq_supported[fs].max;
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq),
&freq, DPLL_A_PIN_PAD)) {
nla_nest_cancel(msg, nest);
@@ -329,9 +329,9 @@ static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
{
int fs;
- for (fs = 0; fs < pin->prop->freq_supported_num; fs++)
- if (freq >= pin->prop->freq_supported[fs].min &&
- freq <= pin->prop->freq_supported[fs].max)
+ for (fs = 0; fs < pin->prop.freq_supported_num; fs++)
+ if (freq >= pin->prop.freq_supported[fs].min &&
+ freq <= pin->prop.freq_supported[fs].max)
return true;
return false;
}
@@ -421,7 +421,7 @@ static int
dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
struct netlink_ext_ack *extack)
{
- const struct dpll_pin_properties *prop = pin->prop;
+ const struct dpll_pin_properties *prop = &pin->prop;
struct dpll_pin_ref *ref;
int ret;
@@ -553,6 +553,24 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
}
+static bool dpll_pin_available(struct dpll_pin *pin)
+{
+ struct dpll_pin_ref *par_ref;
+ unsigned long i;
+
+ if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
+ return false;
+ xa_for_each(&pin->parent_refs, i, par_ref)
+ if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
+ DPLL_REGISTERED))
+ return true;
+ xa_for_each(&pin->dpll_refs, i, par_ref)
+ if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
+ DPLL_REGISTERED))
+ return true;
+ return false;
+}
+
/**
* dpll_device_change_ntf - notify that the dpll device has been changed
* @dpll: registered dpll pointer
@@ -579,7 +597,7 @@ dpll_pin_event_send(enum dpll_cmd event, struct dpll_pin *pin)
int ret = -ENOMEM;
void *hdr;
- if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)))
+ if (!dpll_pin_available(pin))
return -ENODEV;
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
@@ -717,7 +735,7 @@ dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
int ret;
if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
- pin->prop->capabilities)) {
+ pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "state changing is not allowed");
return -EOPNOTSUPP;
}
@@ -753,7 +771,7 @@ dpll_pin_state_set(struct dpll_device *dpll, struct dpll_pin *pin,
int ret;
if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
- pin->prop->capabilities)) {
+ pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "state changing is not allowed");
return -EOPNOTSUPP;
}
@@ -780,7 +798,7 @@ dpll_pin_prio_set(struct dpll_device *dpll, struct dpll_pin *pin,
int ret;
if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE &
- pin->prop->capabilities)) {
+ pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "prio changing is not allowed");
return -EOPNOTSUPP;
}
@@ -808,7 +826,7 @@ dpll_pin_direction_set(struct dpll_pin *pin, struct dpll_device *dpll,
int ret;
if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE &
- pin->prop->capabilities)) {
+ pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "direction changing is not allowed");
return -EOPNOTSUPP;
}
@@ -838,8 +856,8 @@ dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
int ret;
phase_adj = nla_get_s32(phase_adj_attr);
- if (phase_adj > pin->prop->phase_range.max ||
- phase_adj < pin->prop->phase_range.min) {
+ if (phase_adj > pin->prop.phase_range.max ||
+ phase_adj < pin->prop.phase_range.min) {
NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
"phase adjust value not supported");
return -EINVAL;
@@ -1023,7 +1041,7 @@ dpll_pin_find(u64 clock_id, struct nlattr *mod_name_attr,
unsigned long i;
xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) {
- prop = pin->prop;
+ prop = &pin->prop;
cid_match = clock_id ? pin->clock_id == clock_id : true;
mod_match = mod_name_attr && module_name(pin->module) ?
!nla_strcmp(mod_name_attr,
@@ -1130,6 +1148,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
}
pin = dpll_pin_find_from_nlattr(info);
if (!IS_ERR(pin)) {
+ if (!dpll_pin_available(pin)) {
+ nlmsg_free(msg);
+ return -ENODEV;
+ }
ret = dpll_msg_add_pin_handle(msg, pin);
if (ret) {
nlmsg_free(msg);
@@ -1177,8 +1199,11 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
unsigned long i;
int ret = 0;
+ mutex_lock(&dpll_lock);
xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
ctx->idx) {
+ if (!dpll_pin_available(pin))
+ continue;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
&dpll_nl_family, NLM_F_MULTI,
@@ -1194,6 +1219,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
genlmsg_end(skb, hdr);
}
+ mutex_unlock(&dpll_lock);
+
if (ret == -EMSGSIZE) {
ctx->idx = i;
return skb->len;
@@ -1349,6 +1376,7 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
unsigned long i;
int ret = 0;
+ mutex_lock(&dpll_lock);
xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED,
ctx->idx) {
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
@@ -1365,6 +1393,8 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
genlmsg_end(skb, hdr);
}
+ mutex_unlock(&dpll_lock);
+
if (ret == -EMSGSIZE) {
ctx->idx = i;
return skb->len;
@@ -1415,20 +1445,6 @@ dpll_unlock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
mutex_unlock(&dpll_lock);
}
-int dpll_lock_dumpit(struct netlink_callback *cb)
-{
- mutex_lock(&dpll_lock);
-
- return 0;
-}
-
-int dpll_unlock_dumpit(struct netlink_callback *cb)
-{
- mutex_unlock(&dpll_lock);
-
- return 0;
-}
-
int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{
@@ -1441,7 +1457,8 @@ int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
}
info->user_ptr[0] = xa_load(&dpll_pin_xa,
nla_get_u32(info->attrs[DPLL_A_PIN_ID]));
- if (!info->user_ptr[0]) {
+ if (!info->user_ptr[0] ||
+ !dpll_pin_available(info->user_ptr[0])) {
NL_SET_ERR_MSG(info->extack, "pin not found");
ret = -ENODEV;
goto unlock_dev;
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index eaee5be7aa64..1e95f5397cfc 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -95,9 +95,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
},
{
.cmd = DPLL_CMD_DEVICE_GET,
- .start = dpll_lock_dumpit,
.dumpit = dpll_nl_device_get_dumpit,
- .done = dpll_unlock_dumpit,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
},
{
@@ -129,9 +127,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
},
{
.cmd = DPLL_CMD_PIN_GET,
- .start = dpll_lock_dumpit,
.dumpit = dpll_nl_pin_get_dumpit,
- .done = dpll_unlock_dumpit,
.policy = dpll_pin_get_dump_nl_policy,
.maxattr = DPLL_A_PIN_ID,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h
index 92d4c9c4f788..f491262bee4f 100644
--- a/drivers/dpll/dpll_nl.h
+++ b/drivers/dpll/dpll_nl.h
@@ -30,8 +30,6 @@ dpll_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
void
dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
-int dpll_lock_dumpit(struct netlink_callback *cb);
-int dpll_unlock_dumpit(struct netlink_callback *cb);
int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info);
int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 6ac5ff20a2fe..401a77e3b5fa 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
*/
card->bm_generation = generation;
- if (root_device == NULL) {
+ if (card->gap_count == 0) {
+ /*
+ * If self IDs have inconsistent gap counts, do a
+ * bus reset ASAP. The config rom read might never
+ * complete, so don't wait for it. However, still
+ * send a PHY configuration packet prior to the
+ * bus reset. The PHY configuration packet might
+ * fail, but 1394-2008 8.4.5.2 explicitly permits
+ * it in this case, so it should be safe to try.
+ */
+ new_root_id = local_id;
+ /*
+ * We must always send a bus reset if the gap count
+ * is inconsistent, so bypass the 5-reset limit.
+ */
+ card->bm_retries = 0;
+ } else if (root_device == NULL) {
/*
* Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.
@@ -484,7 +500,19 @@ static void bm_work(struct work_struct *work)
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
- reset_bus(card, true);
+ /*
+ * Where possible, use a short bus reset to minimize
+ * disruption to isochronous transfers. But in the event
+ * of a gap count inconsistency, use a long bus reset.
+ *
+ * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
+ * may set different gap counts after a bus reset. On a mixed
+ * 1394/1394a bus, a short bus reset can get doubled. Some
+ * nodes may treat the double reset as one bus reset and others
+ * may treat it as two, causing a gap count inconsistency
+ * again. Using a long bus reset prevents this.
+ */
+ reset_bus(card, card->gap_count != 0);
/* Will allocate broadcast channel after the reset. */
goto out;
}
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 0547253d16fe..7d3346b3a2bf 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -118,10 +118,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
* @buf: where to put the string
* @size: size of @buf, in bytes
*
- * The string is taken from a minimal ASCII text descriptor leaf after
- * the immediate entry with @key. The string is zero-terminated.
- * An overlong string is silently truncated such that it and the
- * zero byte fit into @size.
+ * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the
+ * @key. The string is zero-terminated. An overlong string is silently truncated such that it
+ * and the zero byte fit into @size.
*
* Returns strlen(buf) or a negative error code.
*/
@@ -368,8 +367,17 @@ static ssize_t show_text_leaf(struct device *dev,
for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) {
int result = fw_csr_string(directories[i], attr->key, buf, bufsize);
// Detected.
- if (result >= 0)
+ if (result >= 0) {
ret = result;
+ } else if (i == 0 && attr->key == CSR_VENDOR) {
+ // Sony DVMC-DA1 has configuration ROM such that the descriptor leaf entry
+ // in the root directory follows to the directory entry for vendor ID
+ // instead of the immediate value for vendor ID.
+ result = fw_csr_string(directories[i], CSR_DIRECTORY | attr->key, buf,
+ bufsize);
+ if (result >= 0)
+ ret = result;
+ }
}
if (ret >= 0) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 6146b2927d5c..f2556a8e9401 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -107,12 +107,12 @@ struct ffa_drv_info {
struct work_struct notif_pcpu_work;
struct work_struct irq_work;
struct xarray partition_info;
- unsigned int partition_count;
DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
struct mutex notify_lock; /* lock to protect notifier hashtable */
};
static struct ffa_drv_info *drv_info;
+static void ffa_partitions_cleanup(void);
/*
* The driver must be able to support all the versions from the earliest
@@ -733,6 +733,11 @@ static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
void *cb_data;
partition = xa_load(&drv_info->partition_info, part_id);
+ if (!partition) {
+ pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+ return;
+ }
+
read_lock(&partition->rw_lock);
callback = partition->callback;
cb_data = partition->cb_data;
@@ -915,6 +920,11 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
return -EOPNOTSUPP;
partition = xa_load(&drv_info->partition_info, part_id);
+ if (!partition) {
+ pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+ return -EINVAL;
+ }
+
write_lock(&partition->rw_lock);
cb_valid = !!partition->callback;
@@ -1186,9 +1196,9 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
kfree(pbuf);
}
-static void ffa_setup_partitions(void)
+static int ffa_setup_partitions(void)
{
- int count, idx;
+ int count, idx, ret;
uuid_t uuid;
struct ffa_device *ffa_dev;
struct ffa_dev_part_info *info;
@@ -1197,7 +1207,7 @@ static void ffa_setup_partitions(void)
count = ffa_partition_probe(&uuid_null, &pbuf);
if (count <= 0) {
pr_info("%s: No partitions found, error %d\n", __func__, count);
- return;
+ return -EINVAL;
}
xa_init(&drv_info->partition_info);
@@ -1226,40 +1236,53 @@ static void ffa_setup_partitions(void)
ffa_device_unregister(ffa_dev);
continue;
}
- xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL);
+ rwlock_init(&info->rw_lock);
+ ret = xa_insert(&drv_info->partition_info, tpbuf->id,
+ info, GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
+ __func__, tpbuf->id, ret);
+ ffa_device_unregister(ffa_dev);
+ kfree(info);
+ }
}
- drv_info->partition_count = count;
kfree(pbuf);
/* Allocate for the host */
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
- xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL);
- drv_info->partition_count++;
+ if (!info) {
+ pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n",
+ __func__, drv_info->vm_id);
+ /* Already registered devices are freed on bus_exit */
+ ffa_partitions_cleanup();
+ return -ENOMEM;
+ }
+
+ rwlock_init(&info->rw_lock);
+ ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
+ info, GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
+ __func__, drv_info->vm_id, ret);
+ kfree(info);
+ /* Already registered devices are freed on bus_exit */
+ ffa_partitions_cleanup();
+ }
+
+ return ret;
}
static void ffa_partitions_cleanup(void)
{
- struct ffa_dev_part_info **info;
- int idx, count = drv_info->partition_count;
-
- if (!count)
- return;
-
- info = kcalloc(count, sizeof(*info), GFP_KERNEL);
- if (!info)
- return;
-
- xa_extract(&drv_info->partition_info, (void **)info, 0, VM_ID_MASK,
- count, XA_PRESENT);
+ struct ffa_dev_part_info *info;
+ unsigned long idx;
- for (idx = 0; idx < count; idx++)
- kfree(info[idx]);
- kfree(info);
+ xa_for_each(&drv_info->partition_info, idx, info) {
+ xa_erase(&drv_info->partition_info, idx);
+ kfree(info);
+ }
- drv_info->partition_count = 0;
xa_destroy(&drv_info->partition_info);
}
@@ -1508,7 +1531,11 @@ static int __init ffa_init(void)
ffa_notifications_setup();
- ffa_setup_partitions();
+ ret = ffa_setup_partitions();
+ if (ret) {
+ pr_err("failed to setup partitions\n");
+ goto cleanup_notifs;
+ }
ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
drv_info, true);
@@ -1516,6 +1543,9 @@ static int __init ffa_init(void)
pr_info("Failed to register driver sched callback %d\n", ret);
return 0;
+
+cleanup_notifs:
+ ffa_notifications_cleanup();
free_pages:
if (drv_info->tx_buffer)
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
@@ -1535,7 +1565,6 @@ static void __exit ffa_exit(void)
ffa_rxtx_unmap(drv_info->vm_id);
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
- xa_destroy(&drv_info->partition_info);
kfree(drv_info);
arm_ffa_bus_exit();
}
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index c0644558042a..e2050adbf85c 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -13,7 +13,7 @@
#include "notify.h"
/* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
enum scmi_clock_protocol_cmd {
CLOCK_ATTRIBUTES = 0x3,
@@ -954,8 +954,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
scmi_clock_describe_rates_get(ph, clkid, clk);
}
- if (PROTOCOL_REV_MAJOR(version) >= 0x2 &&
- PROTOCOL_REV_MINOR(version) >= 0x1) {
+ if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
cinfo->clock_config_set = scmi_clock_config_set_v2;
cinfo->clock_config_get = scmi_clock_config_get_v2;
} else {
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index c46dc5215af7..00b165d1f502 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -314,6 +314,7 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
+bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
/* declarations for message passing transports */
struct scmi_msg_payld;
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 19246ed1f01f..b8d470417e8f 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -45,6 +45,20 @@ static void rx_callback(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
+ /*
+ * An A2P IRQ is NOT valid when received while the platform still has
+ * the ownership of the channel, because the platform at first releases
+ * the SMT channel and then sends the completion interrupt.
+ *
+ * This addresses a possible race condition in which a spurious IRQ from
+ * a previous timed-out reply which arrived late could be wrongly
+ * associated with the next pending transaction.
+ */
+ if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
+ dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
+ return;
+ }
+
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
}
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 8ea2a7b3d35d..211e8e0aef2c 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -350,8 +350,8 @@ process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
}
static inline void
-process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
- unsigned int loop_idx,
+process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
+ struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
{
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
@@ -362,10 +362,23 @@ process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
if (dom->level_indexing_mode) {
+ int ret;
+
opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
- xa_store(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL);
- xa_store(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
+ GFP_KERNEL);
+ if (ret)
+ dev_warn(dev,
+ "Failed to add opps_by_idx at %d - ret:%d\n",
+ opp->level_index, ret);
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret)
+ dev_warn(dev,
+ "Failed to add opps_by_lvl at %d - ret:%d\n",
+ opp->perf, ret);
+
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
}
@@ -382,7 +395,7 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
process_response_opp(opp, st->loop_idx, response);
else
- process_response_opp_v4(p->perf_dom, opp, st->loop_idx,
+ process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
response);
p->perf_dom->opp_count++;
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index 0493aa3c12bf..350573518503 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -1111,7 +1111,6 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
int i;
for (i = 0; i < num_chans; i++) {
- void *xret;
struct scmi_raw_queue *q;
q = scmi_raw_queue_init(raw);
@@ -1120,13 +1119,12 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
goto err_xa;
}
- xret = xa_store(&raw->chans_q, channels[i], q,
+ ret = xa_insert(&raw->chans_q, channels[i], q,
GFP_KERNEL);
- if (xa_err(xret)) {
+ if (ret) {
dev_err(dev,
"Fail to allocate Raw queue 0x%02X\n",
channels[i]);
- ret = xa_err(xret);
goto err_xa;
}
}
@@ -1322,6 +1320,12 @@ void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
dev = raw->handle->dev;
q = scmi_raw_queue_select(raw, idx,
SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
+ if (!q) {
+ dev_warn(dev,
+ "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
+ idx, chan_id);
+ return;
+ }
/*
* Grab the msg_q_lock upfront to avoid a possible race between
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index 87b4f4d35f06..8bf495bcad09 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -10,7 +10,7 @@
#include <linux/processor.h>
#include <linux/types.h>
-#include <asm-generic/bug.h>
+#include <linux/bug.h>
#include "common.h"
@@ -122,3 +122,9 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
}
+
+bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
+{
+ return (ioread32(&shmem->channel_status) &
+ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 83f5bb57fa4c..83092d93f36a 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void)
efi_memory_desc_t *md;
for_each_efi_memory_desc(md) {
- int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
struct resource *res;
if (!(md->attribute & EFI_MEMORY_SP))
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 3e8d4b51a814..97bafb5f7038 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
if (!cap_info->phys) {
kfree(cap_info->pages);
kfree(cap_info);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 35c37f667781..9b3884ff81e6 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -523,6 +523,17 @@ static void cper_print_tstamp(const char *pfx,
}
}
+struct ignore_section {
+ guid_t guid;
+ const char *name;
+};
+
+static const struct ignore_section ignore_sections[] = {
+ { .guid = CPER_SEC_CXL_GEN_MEDIA_GUID, .name = "CXL General Media Event" },
+ { .guid = CPER_SEC_CXL_DRAM_GUID, .name = "CXL DRAM Event" },
+ { .guid = CPER_SEC_CXL_MEM_MODULE_GUID, .name = "CXL Memory Module Event" },
+};
+
static void
cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
int sec_no)
@@ -543,6 +554,14 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+ for (int i = 0; i < ARRAY_SIZE(ignore_sections); i++) {
+ if (guid_equal(sec_type, &ignore_sections[i].guid)) {
+ printk("%ssection_type: %s\n", newpfx, ignore_sections[i].name);
+ return;
+ }
+ }
+
if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index d4987d013080..a00e07b853f2 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -144,15 +144,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
/*
- * Special purpose memory is 'soft reserved', which means it
- * is set aside initially, but can be hotplugged back in or
- * be assigned to the dax driver after boot.
- */
- if (efi_soft_reserve_enabled() &&
- (md->attribute & EFI_MEMORY_SP))
- return false;
-
- /*
* According to the spec, these regions are no longer reserved
* after calling ExitBootServices(). However, we can only use
* them as System RAM if they can be mapped writeback cacheable.
@@ -196,6 +187,16 @@ static __init void reserve_regions(void)
size = npages << PAGE_SHIFT;
if (is_memory(md)) {
+ /*
+ * Special purpose memory is 'soft reserved', which
+ * means it is set aside initially. Don't add a memblock
+ * for it now so that it can be hotplugged back in or
+ * be assigned to the dax driver after boot.
+ */
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ continue;
+
early_init_dt_add_memory_arch(paddr, size);
if (!is_usable_memory(md))
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 06964a3c130f..73f4810f6db3 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
-cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE
+cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax
cflags-$(CONFIG_LOONGARCH) += -fpie
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
@@ -143,7 +143,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
# exist.
STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
-STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
+STUBCOPY_RELOC-$(CONFIG_RISCV) := -E R_RISCV_HI20\|R_RISCV_$(BITS)\|R_RISCV_RELAX
# For LoongArch, keep all the symbols in .init section and make sure that no
# absolute symbols references exist.
diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c
index 6b83c492c3b8..31928bd87e0f 100644
--- a/drivers/firmware/efi/libstub/alignedmem.c
+++ b/drivers/firmware/efi/libstub/alignedmem.c
@@ -14,6 +14,7 @@
* @max: the address that the last allocated memory page shall not
* exceed
* @align: minimum alignment of the base of the allocation
+ * @memory_type: the type of memory to allocate
*
* Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
* to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 212687c30d79..c04b82ea40f2 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -956,7 +956,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed,
- int memory_type, unsigned long alloc_limit);
+ int memory_type, unsigned long alloc_min,
+ unsigned long alloc_max);
efi_status_t efi_random_get_seed(void);
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
index 62d63f7a2645..1a9808012abd 100644
--- a/drivers/firmware/efi/libstub/kaslr.c
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -119,7 +119,7 @@ efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
*/
status = efi_random_alloc(*reserve_size, min_kimg_align,
reserve_addr, phys_seed,
- EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
} else {
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 674a064b8f7a..4e96a855fdf4 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -17,7 +17,7 @@
static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
unsigned long size,
unsigned long align_shift,
- u64 alloc_limit)
+ u64 alloc_min, u64 alloc_max)
{
unsigned long align = 1UL << align_shift;
u64 first_slot, last_slot, region_end;
@@ -30,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
return 0;
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
- alloc_limit);
+ alloc_max);
if (region_end < size)
return 0;
- first_slot = round_up(md->phys_addr, align);
+ first_slot = round_up(max(md->phys_addr, alloc_min), align);
last_slot = round_down(region_end - size + 1, align);
if (first_slot > last_slot)
@@ -56,7 +56,8 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long *addr,
unsigned long random_seed,
int memory_type,
- unsigned long alloc_limit)
+ unsigned long alloc_min,
+ unsigned long alloc_max)
{
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
@@ -78,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size,
efi_memory_desc_t *md = (void *)map->map + map_offset;
unsigned long slots;
- slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit);
+ slots = get_entry_num_slots(md, size, ilog2(align), alloc_min,
+ alloc_max);
MD_NUM_SLOTS(md) = slots;
total_slots += slots;
if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 0d510c9a06a4..99429bc4b0c7 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -223,8 +223,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
}
}
-void efi_adjust_memory_range_protection(unsigned long start,
- unsigned long size)
+efi_status_t efi_adjust_memory_range_protection(unsigned long start,
+ unsigned long size)
{
efi_status_t status;
efi_gcd_memory_space_desc_t desc;
@@ -236,13 +236,17 @@ void efi_adjust_memory_range_protection(unsigned long start,
rounded_end = roundup(start + size, EFI_PAGE_SIZE);
if (memattr != NULL) {
- efi_call_proto(memattr, clear_memory_attributes, rounded_start,
- rounded_end - rounded_start, EFI_MEMORY_XP);
- return;
+ status = efi_call_proto(memattr, clear_memory_attributes,
+ rounded_start,
+ rounded_end - rounded_start,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to clear EFI_MEMORY_XP attribute\n");
+ return status;
}
if (efi_dxe_table == NULL)
- return;
+ return EFI_SUCCESS;
/*
* Don't modify memory region attributes, they are
@@ -255,7 +259,7 @@ void efi_adjust_memory_range_protection(unsigned long start,
status = efi_dxe_call(get_memory_space_descriptor, start, &desc);
if (status != EFI_SUCCESS)
- return;
+ break;
next = desc.base_address + desc.length;
@@ -280,8 +284,10 @@ void efi_adjust_memory_range_protection(unsigned long start,
unprotect_start,
unprotect_start + unprotect_size,
status);
+ break;
}
}
+ return EFI_SUCCESS;
}
static void setup_unaccepted_memory(void)
@@ -793,6 +799,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
seed[0], EFI_LOADER_CODE,
+ LOAD_PHYSICAL_ADDR,
EFI_X86_KERNEL_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
return status;
@@ -805,9 +812,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
*kernel_entry = addr + entry;
- efi_adjust_memory_range_protection(addr, kernel_total_size);
-
- return EFI_SUCCESS;
+ return efi_adjust_memory_range_protection(addr, kernel_total_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h
index 37c5a36b9d8c..1c20e99a6494 100644
--- a/drivers/firmware/efi/libstub/x86-stub.h
+++ b/drivers/firmware/efi/libstub/x86-stub.h
@@ -5,8 +5,8 @@
extern void trampoline_32bit_src(void *, bool);
extern const u16 trampoline_ljmp_imm_offset;
-void efi_adjust_memory_range_protection(unsigned long start,
- unsigned long size);
+efi_status_t efi_adjust_memory_range_protection(unsigned long start,
+ unsigned long size);
#ifdef CONFIG_X86_64
efi_status_t efi_setup_5level_paging(void);
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index bdb17eac0cb4..1ceace956758 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -119,7 +119,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
}
status = efi_random_alloc(alloc_size, min_kimg_align, &image_base,
- seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS) {
efi_err("Failed to allocate memory\n");
goto free_cmdline;
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
index 09525fb5c240..01f0f90ea418 100644
--- a/drivers/firmware/efi/riscv-runtime.c
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void)
efi_memory_desc_t *md;
for_each_efi_memory_desc(md) {
- int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
struct resource *res;
if (!(md->attribute & EFI_MEMORY_SP))
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 81f5f62e34fc..682e417be5a3 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -167,7 +167,7 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader)
u32 *response_msg;
int ret;
- response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg),
+ response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
GFP_KERNEL);
if (!response_msg)
return -ENOMEM;
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 82fcfd29bc4d..3c197db42c9d 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -128,4 +128,4 @@ unlock_mutex:
}
/* must execute after PCI subsystem for EFI quirks */
-subsys_initcall_sync(sysfb_init);
+device_initcall(sysfb_init);
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index e00c33310517..753e7be039e4 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
- gpiod_set_value_cansleep(chip->gpiod_oe, 1);
-
spi_set_drvdata(spi, chip);
chip->gpio_chip.label = spi->modalias;
@@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
goto exit_destroy;
}
+ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+
ret = gpiochip_add_data(&chip->gpio_chip, chip);
if (!ret)
return 0;
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index be7f2fa5aa7b..806b88d8dfb7 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -330,20 +330,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
state = sprd_eic_get(chip, offset);
- if (state)
+ if (state) {
sprd_eic_update(chip, offset,
SPRD_EIC_DBNC_IEV, 0);
- else
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IC, 1);
+ } else {
sprd_eic_update(chip, offset,
SPRD_EIC_DBNC_IEV, 1);
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_DBNC_IC, 1);
+ }
break;
default:
return -ENOTSUPP;
@@ -355,20 +362,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
state = sprd_eic_get(chip, offset);
- if (state)
+ if (state) {
sprd_eic_update(chip, offset,
SPRD_EIC_LATCH_INTPOL, 0);
- else
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTCLR, 1);
+ } else {
sprd_eic_update(chip, offset,
SPRD_EIC_LATCH_INTPOL, 1);
+ sprd_eic_update(chip, offset,
+ SPRD_EIC_LATCH_INTCLR, 1);
+ }
break;
default:
return -ENOTSUPP;
@@ -382,29 +396,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
default:
@@ -417,29 +436,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_BOTH:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
+ sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
irq_set_handler_locked(data, handle_level_irq);
break;
default:
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 88066826d8e5..cd3e9657cc36 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1651,6 +1651,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_interrupt = "INT33FC:00@3",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 0.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 44c8f5743a24..75be4a3ca7f8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -968,11 +968,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiochip_irqchip_init_valid_mask(gc);
if (ret)
- goto err_remove_acpi_chip;
+ goto err_free_hogs;
ret = gpiochip_irqchip_init_hw(gc);
if (ret)
- goto err_remove_acpi_chip;
+ goto err_remove_irqchip_mask;
ret = gpiochip_add_irqchip(gc, lock_key, request_key);
if (ret)
@@ -997,23 +997,23 @@ err_remove_irqchip:
gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
gpiochip_irqchip_free_valid_mask(gc);
-err_remove_acpi_chip:
+err_free_hogs:
+ gpiochip_free_hogs(gc);
acpi_gpiochip_remove(gc);
+ gpiochip_remove_pin_ranges(gc);
err_remove_of_chip:
- gpiochip_free_hogs(gc);
of_gpiochip_remove(gc);
err_free_gpiochip_mask:
- gpiochip_remove_pin_ranges(gc);
gpiochip_free_valid_mask(gc);
+err_remove_from_list:
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_del(&gdev->list);
+ spin_unlock_irqrestore(&gpio_lock, flags);
if (gdev->dev.release) {
/* release() has been registered by gpiochip_setup_dev() */
gpio_device_put(gdev);
goto err_print_message;
}
-err_remove_from_list:
- spin_lock_irqsave(&gpio_lock, flags);
- list_del(&gdev->list);
- spin_unlock_irqrestore(&gpio_lock, flags);
err_free_label:
kfree_const(gdev->label);
err_free_descs:
@@ -2042,6 +2042,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free);
int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config)
{
+#ifdef CONFIG_PINCTRL
+ if (list_empty(&gc->gpiodev->pin_ranges))
+ return -ENOTSUPP;
+#endif
+
return pinctrl_gpio_set_config(gc, offset, config);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_config);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2520db0b776e..c7edba18a6f0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -199,7 +199,7 @@ config DRM_TTM
config DRM_TTM_KUNIT_TEST
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
default n
- depends on DRM && KUNIT && MMU
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
select DRM_TTM
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
@@ -207,7 +207,8 @@ config DRM_TTM_KUNIT_TEST
help
Enables unit tests for TTM, a GPU memory manager subsystem used
to manage memory buffers. This option is mostly useful for kernel
- developers.
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3d8a48f46b01..79827a6dcd7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -200,6 +200,7 @@ extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
extern int amdgpu_backlight;
+extern int amdgpu_damage_clips;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
@@ -1078,6 +1079,8 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
+ /* indicate amdgpu suspension status */
+ bool suspend_complete;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
@@ -1547,9 +1550,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 2deebece810e..7099ff9cf8c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1519,4 +1519,22 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
#endif /* CONFIG_AMD_PMC */
}
+/**
+ * amdgpu_choose_low_power_state
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Choose the target low power state for the GPU
+ */
+void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
+{
+ if (adev->in_runpm)
+ return;
+
+ if (amdgpu_acpi_is_s0ix_active(adev))
+ adev->in_s0ix = true;
+ else if (amdgpu_acpi_is_s3_active(adev))
+ adev->in_s3 = true;
+}
+
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 77e263660288..41db030ddc4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -141,11 +141,31 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
static const struct drm_client_funcs kfd_client_funcs = {
.unregister = drm_client_release,
};
+
+int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
+ &kfd_client_funcs);
+ if (ret) {
+ dev_err(adev->dev, "Failed to init DRM client: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_client_register(&adev->kfd.client);
+
+ return 0;
+}
+
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i;
int last_valid_bit;
- int ret;
amdgpu_amdkfd_gpuvm_init_mem_limits();
@@ -164,12 +184,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.enable_mes = adev->enable_mes,
};
- ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", &kfd_client_funcs);
- if (ret) {
- dev_err(adev->dev, "Failed to init DRM client: %d\n", ret);
- return;
- }
-
/* this is going to have a few of the MSBs set that we need to
* clear
*/
@@ -208,10 +222,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
&gpu_resources);
- if (adev->kfd.init_complete)
- drm_client_register(&adev->kfd.client);
- else
- drm_client_release(&adev->kfd.client);
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index f262b9d89541..27c61c535e29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -182,6 +182,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm,
struct svm_range_bo *svm_bo);
+
+int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev);
#if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
#endif
@@ -301,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 899e31e3a5e8..3a3f3ce09f00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- if (!(ring && drm_sched_wqueue_ready(&ring->sched)))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
/* stop secheduler and drain ring. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index f183d7faeeec..231fd927dcfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2085,21 +2085,35 @@ out:
return ret;
}
-void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
{
struct kfd_mem_attachment *entry;
struct amdgpu_vm *vm;
+ int ret;
vm = drm_priv_to_vm(drm_priv);
mutex_lock(&mem->lock);
+ ret = amdgpu_bo_reserve(mem->bo, true);
+ if (ret)
+ goto out;
+
list_for_each_entry(entry, &mem->attachments, list) {
- if (entry->bo_va->base.vm == vm)
- kfd_mem_dmaunmap_attachment(mem, entry);
+ if (entry->bo_va->base.vm != vm)
+ continue;
+ if (entry->bo_va->base.bo->tbo.ttm &&
+ !entry->bo_va->base.bo->tbo.ttm->sg)
+ continue;
+
+ kfd_mem_dmaunmap_attachment(mem, entry);
}
+ amdgpu_bo_unreserve(mem->bo);
+out:
mutex_unlock(&mem->lock);
+
+ return ret;
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index e485dd3357c6..1afbb2e932c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1678,7 +1678,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_wqueue_stop(&ring->sched);
}
@@ -1694,7 +1694,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_wqueue_start(&ring->sched);
}
@@ -1916,8 +1916,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
ring = adev->rings[val];
- if (!ring || !ring->funcs->preempt_ib ||
- !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring) ||
+ !ring->funcs->preempt_ib)
return -EINVAL;
/* the last preemption failed */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b158d27d0a71..94bdb5fa6ebc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4121,23 +4121,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
}
} else {
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 0):
- case IP_VERSION(13, 0, 7):
- case IP_VERSION(13, 0, 10):
- r = psp_gpu_reset(adev);
- break;
- default:
- tmp = amdgpu_reset_method;
- /* It should do a default reset when loading or reloading the driver,
- * regardless of the module parameter reset_method.
- */
- amdgpu_reset_method = AMD_RESET_METHOD_NONE;
- r = amdgpu_asic_reset(adev);
- amdgpu_reset_method = tmp;
- break;
- }
-
+ tmp = amdgpu_reset_method;
+ /* It should do a default reset when loading or reloading the driver,
+ * regardless of the module parameter reset_method.
+ */
+ amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+ r = amdgpu_asic_reset(adev);
+ amdgpu_reset_method = tmp;
if (r) {
dev_err(adev->dev, "asic reset on init failed\n");
goto failed;
@@ -4524,13 +4514,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
+ amdgpu_choose_low_power_state(adev);
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
- return r;
+ goto unprepare;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
@@ -4539,10 +4531,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
if (r)
- return r;
+ goto unprepare;
}
return 0;
+
+unprepare:
+ adev->in_s0ix = adev->in_s3 = false;
+
+ return r;
}
/**
@@ -4579,7 +4576,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work);
- flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev);
@@ -5031,7 +5027,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
spin_lock(&ring->sched.job_list_lock);
@@ -5170,7 +5166,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
/* Clear job fence from fence drv to avoid force_completion
@@ -5637,7 +5633,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
@@ -5706,7 +5702,7 @@ skip_hw_reset:
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_start(&ring->sched, true);
@@ -6061,7 +6057,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_stop(&ring->sched, NULL);
@@ -6189,7 +6185,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !drm_sched_wqueue_ready(&ring->sched))
+ if (!amdgpu_ring_sched_ready(ring))
continue;
drm_sched_start(&ring->sched, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index cc69005f5b46..586f4d03039d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -211,6 +211,7 @@ int amdgpu_seamless = -1; /* auto */
uint amdgpu_debug_mask;
int amdgpu_agp = -1; /* auto */
int amdgpu_wbrf = -1;
+int amdgpu_damage_clips = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -860,6 +861,18 @@ MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (defau
module_param_named(backlight, amdgpu_backlight, bint, 0444);
/**
+ * DOC: damageclips (int)
+ * Enable or disable damage clips support. If damage clips support is disabled,
+ * we will force full frame updates, irrespective of what user space sends to
+ * us.
+ *
+ * Defaults to -1 (where it is enabled unless a PSR-SU display is detected).
+ */
+MODULE_PARM_DESC(damageclips,
+ "Damage clips support (0 = disable, 1 = enable, -1 auto (default))");
+module_param_named(damageclips, amdgpu_damage_clips, int, 0444);
+
+/**
* DOC: tmz (int)
* Trusted Memory Zone (TMZ) is a method to protect data being written
* to or read from memory.
@@ -2255,6 +2268,10 @@ retry_init:
if (ret)
goto err_pci;
+ ret = amdgpu_amdkfd_drm_client_create(adev);
+ if (ret)
+ goto err_pci;
+
/*
* 1. don't init fbdev on hw without DCE
* 2. don't init fbdev if there are no connectors
@@ -2472,6 +2489,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ adev->suspend_complete = false;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
@@ -2486,6 +2504,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ adev->suspend_complete = true;
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 73b8cca35bab..c623e23049d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -121,6 +121,7 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
struct amdgpu_bo_param bp;
dma_addr_t dma_addr;
struct page *p;
+ unsigned long x;
int ret;
if (adev->gart.bo != NULL)
@@ -130,6 +131,10 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
if (!p)
return -ENOMEM;
+ /* assign pages to this device */
+ for (x = 0; x < (1UL << order); x++)
+ p[x].mapping = adev->mman.bdev.dev_mapping;
+
/* If the hardware does not support UTCL2 snooping of the CPU caches
* then set_memory_wc() could be used as a workaround to mark the pages
* as write combine memory.
@@ -223,6 +228,7 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
unsigned int order = get_order(adev->gart.table_size);
struct sg_table *sg = adev->gart.bo->tbo.sg;
struct page *p;
+ unsigned long x;
int ret;
ret = amdgpu_bo_reserve(adev->gart.bo, false);
@@ -234,6 +240,8 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
sg_free_table(sg);
kfree(sg);
p = virt_to_page(adev->gart.ptr);
+ for (x = 0; x < (1UL << order); x++)
+ p[x].mapping = NULL;
__free_pages(p, order);
adev->gart.ptr = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index b9674c57c436..6ddc8e3360e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -723,8 +723,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ /* If going to s2idle, no need to wait */
+ if (adev->in_s0ix) {
+ if (!amdgpu_dpm_set_powergating_by_smu(adev,
+ AMD_IP_BLOCK_TYPE_GFX, true))
+ adev->gfx.gfx_off_state = true;
+ } else {
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
+ }
}
} else {
if (adev->gfx.gfx_off_req_count == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index 468a67b302d4..ca5c86e5f7cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
}
}
- if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
+ if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
ret = -EFAULT;
err_free_shared_buf:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 45424ebf9681..5505d646f43a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -635,6 +635,7 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
ring->name);
ring->sched.ready = !r;
+
return r;
}
@@ -717,3 +718,14 @@ void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
}
+
+bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
+{
+ if (!ring)
+ return false;
+
+ if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bbb53720a018..fe1a61eb6e4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -450,5 +450,5 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-
+bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 08916538a615..8db880244324 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -221,8 +221,23 @@ static struct attribute *amdgpu_vram_mgr_attributes[] = {
NULL
};
+static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (attr == &dev_attr_mem_info_vram_vendor.attr &&
+ !adev->gmc.vram_vendor)
+ return 0;
+
+ return attr->mode;
+}
+
const struct attribute_group amdgpu_vram_mgr_attr_group = {
- .attrs = amdgpu_vram_mgr_attributes
+ .attrs = amdgpu_vram_mgr_attributes,
+ .is_visible = amdgpu_vram_attrs_is_visible
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 6f7c031dd197..f24e34dc33d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -204,6 +204,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32(mmIH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(mmIH_RB_CNTL, tmp);
}
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index b8c47e0cf37a..c19681492efa 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -216,6 +216,11 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index d63cab294883..dcdecb18b230 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4027,8 +4027,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = 0;
adev->gfx.mec2_fw = NULL;
}
- amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
- amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
gfx_v10_0_check_fw_write_wait(adev);
out:
@@ -6589,7 +6587,7 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 0ea0866c261f..4f3bfdc75b37 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -107,23 +107,6 @@ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
};
-static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = {
- SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a),
- SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f)
-};
-
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@@ -304,11 +287,6 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_11_0_1,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
break;
- case IP_VERSION(11, 5, 0):
- soc15_program_register_sequence(adev,
- golden_settings_gc_11_5_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_5_0));
- break;
default:
break;
}
@@ -3846,7 +3824,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 69c500910746..3bc6943365a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3034,6 +3034,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v9_0_cp_gfx_enable(adev, true);
+ /* Now only limit the quirk on the APU gfx9 series and already
+ * confirmed that the APU gfx10/gfx11 needn't such update.
+ */
+ if (adev->flags & AMD_IS_APU &&
+ adev->in_s3 && !adev->suspend_complete) {
+ DRM_INFO(" Will skip the CSB packet resubmit\n");
+ return 0;
+ }
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 42e103d7077d..59d9215e5556 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -915,8 +915,8 @@ static int gmc_v6_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
static int gmc_v6_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index efc16e580f1e..45a2f8e031a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1099,8 +1099,8 @@ static int gmc_v7_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
static int gmc_v7_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index ff4ae73d27ec..4422b27a3cc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1219,8 +1219,8 @@ static int gmc_v8_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
static int gmc_v8_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f9039d64ff2d..e67a62db9e12 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1947,13 +1947,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
- static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
- u32 vram_info;
-
- if (!amdgpu_sriov_vf(adev)) {
- vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
- adev->gmc.vram_vendor = vram_info & 0xF;
- }
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
}
@@ -2340,8 +2333,8 @@ static int gmc_v9_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index aecad530b10a..2c02ae69883d 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -215,6 +215,11 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index d9ed7332d805..ad4ad39f128f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -418,6 +418,12 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 8fb05eae340a..b8da0fc29378 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -418,6 +418,13 @@ static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index bc38b90f8cf8..88ea58d5c4ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle,
return ret;
}
-static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
@@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
- .set = jpeg_v4_0_set_interrupt_state,
.process = jpeg_v4_0_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 6ede85b28cc8..78b74daf4eeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
- amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
return 0;
}
@@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle,
return ret;
}
-static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
- .set = jpeg_v4_0_5_set_interrupt_state,
.process = jpeg_v4_0_5_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index e64b33115848..de93614726c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -442,6 +442,12 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index e90f33780803..b4723d68eab0 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -431,6 +431,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
u32 inst_mask;
int i;
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset =
+ SOC15_REG_OFFSET(
+ NBIO, 0,
+ regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
+ << 2;
WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
0xff & ~(adev->gfx.xcc_mask));
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 9a24f17a5750..cada9f300a7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -119,6 +119,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(IH_RB_CNTL, tmp);
}
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 15033efec2ba..1c614451dead 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -574,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
return AMD_RESET_METHOD_MODE1;
}
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset limit on APU side, dGPU hasn't checked yet.
+ * 2) S3 suspend abort and TOS already launched.
+ */
+ if (adev->flags & AMD_IS_APU && adev->in_s3 &&
+ !adev->suspend_complete &&
+ sol_reg)
+ return true;
+
+ return false;
+}
+
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
- (adev->apu_flags & AMD_APU_IS_RAVEN2))
+ /* On the latest Raven, the GPU reset can be performed
+ * successfully. So now, temporarily enable it for the
+ * S3 suspend abort case.
+ */
+ if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+ (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
+ !soc15_need_reset_on_resume(adev))
return 0;
switch (soc15_asic_reset_method(adev)) {
@@ -1302,6 +1325,10 @@ static int soc15_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (soc15_need_reset_on_resume(adev)) {
+ dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
+ soc15_asic_reset(adev);
+ }
return soc15_common_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 48c6efcdeac9..4d7188912edf 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -50,13 +50,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 917707bba7f3..450b6e831509 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,6 +219,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32(mmIH_RB_CNTL, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 169ed400ee7b..8ab01ae919d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -2018,22 +2018,6 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
}
/**
- * vcn_v4_0_set_interrupt_state - set VCN block interrupt state
- *
- * @adev: amdgpu_device pointer
- * @source: interrupt sources
- * @type: interrupt types
- * @state: interrupt states
- *
- * Set VCN block interrupt state
- */
-static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
- unsigned type, enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
-/**
* vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
*
* @adev: amdgpu_device pointer
@@ -2097,7 +2081,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
- .set = vcn_v4_0_set_interrupt_state,
.process = vcn_v4_0_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 2eda30e78f61..49e4c3c09aca 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -269,8 +269,6 @@ static int vcn_v4_0_5_hw_fini(void *handle)
vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
}
-
- amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
}
return 0;
@@ -1669,22 +1667,6 @@ static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_s
}
/**
- * vcn_v4_0_5_set_interrupt_state - set VCN block interrupt state
- *
- * @adev: amdgpu_device pointer
- * @source: interrupt sources
- * @type: interrupt types
- * @state: interrupt states
- *
- * Set VCN block interrupt state
- */
-static int vcn_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
- unsigned type, enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
-/**
* vcn_v4_0_5_process_interrupt - process VCN block interrupt
*
* @adev: amdgpu_device pointer
@@ -1726,7 +1708,6 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp
}
static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = {
- .set = vcn_v4_0_5_set_interrupt_state,
.process = vcn_v4_0_5_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index d364c6dd152c..bf68e18e3824 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -373,6 +373,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index ddfc6941f9d5..db66e6cccaf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -421,6 +421,12 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index df75863393fc..d1caaf0e6a7c 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -674,7 +674,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_nv1x_hex[] = {
@@ -1091,7 +1091,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb9eef807, 0x876dff6d,
0x0000ffff, 0x87fe7e7e,
0x87ea6a6a, 0xb9faf802,
- 0xbe80226c, 0xbf810000,
+ 0xbe80226c, 0xbf9b0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
@@ -1574,7 +1574,7 @@ static const uint32_t cwsr_trap_arcturus_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_aldebaran_hex[] = {
@@ -2065,7 +2065,7 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
@@ -2500,7 +2500,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x876dff6d, 0x0000ffff,
0x87fe7e7e, 0x87ea6a6a,
0xb9faf802, 0xbe80226c,
- 0xbf810000, 0xbf9f0000,
+ 0xbf9b0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
};
@@ -2944,7 +2944,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = {
0xb8eef802, 0xbf0d866e,
0xbfa20002, 0xb97af802,
0xbe80486c, 0xb97af802,
- 0xbe804a6c, 0xbfb00000,
+ 0xbe804a6c, 0xbfb10000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
@@ -3436,5 +3436,5 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
0x86ea6a6a, 0x8f6e837a,
0xb96ee0c2, 0xbf800002,
0xb97a0002, 0xbf8a0000,
- 0xbe801f6c, 0xbf810000,
+ 0xbe801f6c, 0xbf9b0000,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index e0140df0b0ec..71b3dc0c7363 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -1104,7 +1104,7 @@ L_RETURN_WITHOUT_PRIV:
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
- s_endpgm
+ s_endpgm_saved
end
function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index e506411ad28a..bb26338204f4 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -921,7 +921,7 @@ L_RESTORE:
/* the END */
/**************************************************************************/
L_END_PGM:
- s_endpgm
+ s_endpgm_saved
end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index ce4c52ec34d8..80e90fdef291 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1442,7 +1442,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
- amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+ err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
+ if (err)
+ goto sync_memory_failed;
}
mutex_unlock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index f856901055d3..bdc01ca9609a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -574,7 +574,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
- addr = prange->start << PAGE_SHIFT;
+ addr = migrate->start;
src = (uint64_t *)(scratch + npages);
dst = scratch;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 8b7fed913526..22cbfa1bdadd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -170,6 +170,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 15277f1d5cf0..826bc4f6c8a7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
if (has_wa_flag) {
- uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ?
- 0xffff : 0xffffffff;
+ uint32_t wa_mask =
+ (minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff;
m->compute_static_thread_mgmt_se0 = wa_mask;
m->compute_static_thread_mgmt_se1 = wa_mask;
@@ -224,6 +224,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 42d881809dc7..697b6d530d12 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
+ if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) {
+ if (minfo->update_flag & UPDATE_FLAG_IS_GWS)
+ m->compute_resource_limits |=
+ COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
+ else
+ m->compute_resource_limits &=
+ ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
+ }
+
q->is_active = QUEUE_IS_ACTIVE(*q);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 17fbedbf3651..80320b8603fc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -532,6 +532,7 @@ struct queue_properties {
enum mqd_update_flag {
UPDATE_FLAG_DBG_WA_ENABLE = 1,
UPDATE_FLAG_DBG_WA_DISABLE = 2,
+ UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */
};
struct mqd_update_info {
@@ -1488,10 +1489,15 @@ void kfd_dec_compute_active(struct kfd_node *dev);
/* Cgroup Support */
/* Check with device cgroup if @kfd device is accessible */
-static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd)
+static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
{
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
- struct drm_device *ddev = adev_to_drm(kfd->adev);
+ struct drm_device *ddev;
+
+ if (node->xcp)
+ ddev = node->xcp->ddev;
+ else
+ ddev = adev_to_drm(node->adev);
return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
ddev->render->index,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 43eff221eae5..4858112f9a53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws)
{
+ struct mqd_update_info minfo = {0};
struct kfd_node *dev = NULL;
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
@@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
}
pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
+ minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
- pqn->q, NULL);
+ pqn->q, &minfo);
}
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e5f7c92eebcb..6ed2ec381aaa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1638,12 +1638,10 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
else
mode = UNKNOWN_MEMORY_PARTITION_MODE;
- if (pcache->cache_level == 2)
- pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc;
- else if (mode)
- pcache->cache_size = pcache_info[cache_type].cache_size / mode;
- else
- pcache->cache_size = pcache_info[cache_type].cache_size;
+ pcache->cache_size = pcache_info[cache_type].cache_size;
+ /* Partition mode only affects L3 cache size */
+ if (mode && pcache->cache_level == 3)
+ pcache->cache_size /= mode;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d4f525b66a09..5853cf022917 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -272,6 +272,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
{
u32 v_blank_start, v_blank_end, h_position, v_position;
struct amdgpu_crtc *acrtc = NULL;
+ struct dc *dc = adev->dm.dc;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
@@ -284,6 +285,9 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
return 0;
}
+ if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dc, false);
+
/*
* TODO rework base driver to use values directly.
* for now parse it back into reg-format
@@ -1715,7 +1719,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
- init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+
+ init_data.flags.disable_ips_in_vpb = 1;
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
@@ -1836,21 +1843,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub aux callback");
goto error;
}
- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
- goto error;
- }
- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
- goto error;
- }
- }
-
- /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
- * It is expected that DMUB will resend any pending notifications at this point, for
- * example HPD from DPIA.
- */
- if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point. Note
+ * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
+ * align legacy interface initialization sequence. Connection status will be proactivly
+ * detected once in the amdgpu_dm_initialize_drm_device.
+ */
dc_enable_dmub_outbox(adev->dm.dc);
/* DPIA trace goes to dmesg logs only if outbox is enabled */
@@ -1949,7 +1947,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
- if (adev->dm.hpd_rx_offload_wq) {
+ if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
if (adev->dm.hpd_rx_offload_wq[i].wq) {
destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
@@ -2280,6 +2278,7 @@ static int dm_sw_fini(void *handle)
if (adev->dm.dmub_srv) {
dmub_srv_destroy(adev->dm.dmub_srv);
+ kfree(adev->dm.dmub_srv);
adev->dm.dmub_srv = NULL;
}
@@ -3529,6 +3528,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ }
+
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
@@ -3557,10 +3564,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
handle_hpd_rx_irq,
(void *) aconnector);
}
-
- if (adev->dm.hpd_rx_offload_wq)
- adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
- aconnector;
}
}
@@ -4554,6 +4557,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
+ if (dm->hpd_rx_offload_wq)
+ dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
+ aconnector;
+
if (!dc_link_detect_connection_type(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
@@ -5212,6 +5219,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
struct drm_plane_state *new_plane_state,
struct drm_crtc_state *crtc_state,
struct dc_flip_addrs *flip_addrs,
+ bool is_psr_su,
bool *dirty_regions_changed)
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
@@ -5236,6 +5244,10 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
clips = drm_plane_get_damage_clips(new_plane_state);
+ if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
+ is_psr_su)))
+ goto ffu;
+
if (!dm_crtc_state->mpo_requested) {
if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
goto ffu;
@@ -6187,7 +6199,9 @@ create_stream_for_sink(struct drm_connector *connector,
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
+ saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
drm_mode_copy(&mode, freesync_mode);
+ mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
} else {
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode, scale);
@@ -6520,10 +6534,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
/*
* Note: drm_get_edid gets edid in the following order:
@@ -6531,7 +6550,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
* 2) firmware EDID if set via edid_firmware module parameter
* 3) regular DDC read.
*/
- edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+ edid = drm_get_edid(connector, ddc);
if (!edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
@@ -6572,12 +6591,18 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base);
+ struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
/*
* Note: drm_get_edid gets edid in the following order:
@@ -6585,7 +6610,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
* 2) firmware EDID if set via edid_firmware module parameter
* 3) regular DDC read.
*/
- edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+ edid = drm_get_edid(connector, ddc);
if (!edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
@@ -8291,6 +8316,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
&bundle->flip_addrs[planes_count],
+ acrtc_state->stream->link->psr_settings.psr_version ==
+ DC_PSR_VERSION_SU_1,
&dirty_rects_changed);
/*
@@ -8976,16 +9003,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
trace_amdgpu_dm_atomic_commit_tail_begin(state);
- if (dm->dc->caps.ips_support) {
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- if (new_con_state->crtc &&
- new_con_state->crtc->state->active &&
- drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
- dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
- break;
- }
- }
- }
+ if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dm->dc, false);
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_dp_mst_atomic_wait_for_dependencies(state);
@@ -9188,6 +9207,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* To fix this, DC should permit updating only stream properties.
*/
dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ if (!dummy_updates) {
+ DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ continue;
+ }
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
@@ -10728,11 +10751,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
- if (ret) {
- DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
- ret = -EINVAL;
- goto fail;
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
+ goto fail;
+ }
}
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 85b7f58a7f35..c27063305a13 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -67,6 +67,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
+ case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
+ case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
@@ -120,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
+ apply_edid_quirks(edid_buf, edid_caps);
+
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
return result;
@@ -146,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
- apply_edid_quirks(edid_buf, edid_caps);
-
kfree(sads);
kfree(sadb);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 58b880acb087..3390f0d8420a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -711,7 +711,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
{
bool st;
enum dc_irq_source irq_source;
-
+ struct dc *dc = adev->dm.dc;
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
if (!acrtc) {
@@ -729,6 +729,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
st = (state == AMDGPU_IRQ_STATE_ENABLE);
+ if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dc, false);
+
dc_interrupt_set(adev->dm.dc, irq_source, st);
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index f2dfa96f9ef5..39530b2ea495 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -94,7 +94,7 @@ static void calculate_bandwidth(
const uint32_t s_high = 7;
const uint32_t dmif_chunk_buff_margin = 1;
- uint32_t max_chunks_fbc_mode;
+ uint32_t max_chunks_fbc_mode = 0;
int32_t num_cursor_lines;
int32_t i, j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 960c4b4f6ddf..05f392501c0a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1850,19 +1850,21 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega12 */
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
DATA_TABLES(smu_info));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
if (!smu_info_v3_2)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
+
info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
} else if (revision.minor == 3) {
/* Vega20 */
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
DATA_TABLES(smu_info));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
if (!smu_info_v3_3)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
+
info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
}
@@ -2422,10 +2424,11 @@ static enum bp_result get_integrated_info_v11(
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
if (info_v11 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v11->gpucapinfo);
/*
@@ -2637,11 +2640,12 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
if (info_v2_1 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v2_1->gpucapinfo);
/*
@@ -2799,11 +2803,11 @@ static enum bp_result get_integrated_info_v2_2(
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
-
if (info_v2_2 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v2_2->gpucapinfo);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index a5489fe6875f..aa9fd1dc550a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta
int i;
for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) {
+ if (i >= VG_NUM_DCFCLK_DPM_LEVELS)
+ break;
if (clock_table->SocVoltage[i] == voltage)
return clock_table->DcfClocks[i];
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 9c660d1facc7..e64890259235 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -437,32 +437,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
@@ -474,32 +474,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
@@ -655,10 +655,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
+ uint32_t num_memps, num_fclk, num_dcfclk;
int i;
/* Determine min/max p-state values. */
- for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
+ num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS :
+ clock_table->NumMemPstatesEnabled;
+ for (i = 0; i < num_memps; i++) {
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
@@ -670,7 +673,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
min_dram_speed_mts = max_dram_speed_mts;
min_pstate = max_pstate;
- for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
+ for (i = 0; i < num_memps; i++) {
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
@@ -699,9 +702,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
- max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled);
+ num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS :
+ clock_table->NumFclkLevelsEnabled;
+ max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
- for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ clock_table->NumDcfClkLevelsEnabled;
+ for (i = 0; i < num_dcfclk; i++) {
int j;
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index aa7c02ba948e..2c424e435962 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3817,7 +3817,9 @@ static void commit_planes_for_stream(struct dc *dc,
* programming has completed (we turn on phantom OTG in order
* to complete the plane disable for phantom pipes).
*/
- dc->hwss.apply_ctx_to_hw(dc, context);
+
+ if (dc->hwss.disable_phantom_streams)
+ dc->hwss.disable_phantom_streams(dc, context);
}
if (update_type != UPDATE_TYPE_FAST)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 88c6436b28b6..180ac47868c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -291,11 +291,14 @@ void dc_state_destruct(struct dc_state *state)
dc_stream_release(state->phantom_streams[i]);
state->phantom_streams[i] = NULL;
}
+ state->phantom_stream_count = 0;
for (i = 0; i < state->phantom_plane_count; i++) {
dc_plane_state_release(state->phantom_planes[i]);
state->phantom_planes[i] = NULL;
}
+ state->phantom_plane_count = 0;
+
state->stream_mask = 0;
memset(&state->res_ctx, 0, sizeof(state->res_ctx));
memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 5d7aa882416b..c9317ea0258e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -434,6 +434,7 @@ struct dc_config {
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
+ unsigned int disable_ips_in_vpb;
};
enum visual_confirm {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 2b79a0e5638e..363d522603a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -125,7 +125,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dc_context *dc_ctx;
struct dmub_srv *dmub;
enum dmub_status status;
int i;
@@ -133,6 +133,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
+ dc_ctx = dc_dmub_srv->ctx;
dmub = dc_dmub_srv->dmub;
for (i = 0 ; i < count; i++) {
@@ -1161,7 +1162,7 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
{
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dc_context *dc_ctx;
enum dmub_status status;
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
@@ -1170,6 +1171,8 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
return true;
+ dc_ctx = dc_dmub_srv->ctx;
+
if (wait) {
if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
do {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index b08ccb8c68bc..9900dda2eef5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1034,6 +1034,7 @@ enum replay_FW_Message_type {
Replay_Msg_Not_Support = -1,
Replay_Set_Timing_Sync_Supported,
Replay_Set_Residency_Frameupdate_Timer,
+ Replay_Set_Pseudo_VTotal,
};
union replay_error_status {
@@ -1089,6 +1090,10 @@ struct replay_settings {
uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
enum replay_link_off_frame_count_level link_off_frame_count_level;
+ /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
+ uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal;
+ /* Replay last pseudo vtotal set to DMUB */
+ uint16_t last_pseudo_vtotal;
};
/* To split out "global" and "per-panel" config settings.
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index e8570060d007..5bca67407c5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -290,4 +290,5 @@ void dce_panel_cntl_construct(
dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
dce_panel_cntl->base.ctx = init_data->ctx;
dce_panel_cntl->base.inst = init_data->inst;
+ dce_panel_cntl->base.pwrseq_inst = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index e43f77c11c00..5f97a868ada3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -56,16 +56,13 @@ static void dpp3_enable_cm_block(
static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
{
- enum dc_lut_mode mode;
+ enum dc_lut_mode mode = LUT_BYPASS;
uint32_t state_mode;
uint32_t lut_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
- if (state_mode == 0)
- mode = LUT_BYPASS;
-
if (state_mode == 2) {//Programmable RAM LUT
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
if (lut_mode == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
index ad0df1a72a90..9e96a3ace207 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
@@ -215,4 +215,5 @@ void dcn301_panel_cntl_construct(
dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs;
dcn301_panel_cntl->base.ctx = init_data->ctx;
dcn301_panel_cntl->base.inst = init_data->inst;
+ dcn301_panel_cntl->base.pwrseq_inst = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 03248422d6ff..281be20b1a10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -154,8 +154,24 @@ void dcn31_panel_cntl_construct(
struct dcn31_panel_cntl *dcn31_panel_cntl,
const struct panel_cntl_init_data *init_data)
{
+ uint8_t pwrseq_inst = 0xF;
+
dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
dcn31_panel_cntl->base.ctx = init_data->ctx;
dcn31_panel_cntl->base.inst = init_data->inst;
- dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst;
+
+ switch (init_data->eng_id) {
+ case ENGINE_ID_DIGA:
+ pwrseq_inst = 0;
+ break;
+ case ENGINE_ID_DIGB:
+ pwrseq_inst = 1;
+ break;
+ default:
+ DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
+ ASSERT(false);
+ break;
+ }
+
+ dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index 501388014855..d761b0df2878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -203,12 +203,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.transmitter = init_data->transmitter;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index da94e5309fba..81e349d5835b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -184,8 +184,6 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@@ -240,6 +238,8 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 6042a5a6a44f..59ade76ffb18 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -72,11 +72,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 9f37f717a1f8..a0a65e099104 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1112,7 +1112,7 @@ struct pipe_slice_table {
struct pipe_ctx *pri_pipe;
struct dc_plane_state *plane;
int slice_count;
- } mpc_combines[MAX_SURFACES];
+ } mpc_combines[MAX_PLANES];
int mpc_combine_count;
};
@@ -1288,7 +1288,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex
return updated;
}
-static bool should_allow_odm_power_optimization(struct dc *dc,
+static bool should_apply_odm_power_optimization(struct dc *dc,
struct dc_state *context, struct vba_vars_st *v, int *split,
bool *merge)
{
@@ -1392,9 +1392,12 @@ static void try_odm_power_optimization_and_revalidate(
{
int i;
unsigned int new_vlevel;
+ unsigned int cur_policy[MAX_PIPES];
- for (i = 0; i < pipe_cnt; i++)
+ for (i = 0; i < pipe_cnt; i++) {
+ cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy;
pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ }
new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
@@ -1403,6 +1406,9 @@ static void try_odm_power_optimization_and_revalidate(
memset(merge, 0, MAX_PIPES * sizeof(bool));
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge);
context->bw_ctx.dml.vba.VoltageLevel = *vlevel;
+ } else {
+ for (i = 0; i < pipe_cnt; i++)
+ pipes[i].pipe.dest.odm_combine_policy = cur_policy[i];
}
}
@@ -1580,7 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
}
}
- if (should_allow_odm_power_optimization(dc, context, vba, split, merge))
+ if (should_apply_odm_power_optimization(dc, context, vba, split, merge))
try_odm_power_optimization_and_revalidate(
dc, context, pipes, split, merge, vlevel, *pipe_cnt);
@@ -2209,7 +2215,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int i;
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
- dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
+ if (!dc->config.enable_windowed_mpo_odm)
+ dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
* we have to re-calculate the DET allocation and run through DML once more to
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 475c4ec43c01..7ea2bd5374d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -164,8 +164,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.sr_exit_z8_time_us = 210.0,
.sr_enter_plus_exit_z8_time_us = 320.0,
.fclk_change_latency_us = 24.0,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 64d01a9cd68c..1ba6933d2b36 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -341,9 +341,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
break;
}
- if (dml2->config.bbox_overrides.clks_table.num_states)
- p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
-
/* Override from passed values, if available */
for (i = 0; i < p->in_states->num_states; i++) {
if (dml2->config.bbox_overrides.sr_exit_latency_us) {
@@ -400,7 +397,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
/* Copy clocks tables entries, if available */
if (dml2->config.bbox_overrides.clks_table.num_states) {
-
+ p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
}
@@ -439,6 +436,14 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
dml2_policy_build_synthetic_soc_states(s, p);
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ // Override last out_state with data from last in_state
+ // This will ensure that out_state contains max fclk
+ memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
+ &p->in_states->state_array[p->in_states->num_states - 1],
+ sizeof(struct soc_state_bounding_box_st));
+ }
}
void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out)
@@ -793,35 +798,28 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
-/*TODO no support for mpc combine, need rework - should calculate scaling params based on plane+stream*/
-static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, const struct dc_state *context)
+static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context)
{
int i;
- struct scaler_data data = { 0 };
+ struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
+
+ memset(temp_pipe, 0, sizeof(struct pipe_ctx));
for (i = 0; i < MAX_PIPES; i++) {
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state == in && !pipe->prev_odm_pipe) {
- const struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
-
- data = context->res_ctx.pipe_ctx[i].plane_res.scl_data;
- while (next_pipe) {
- data.h_active += next_pipe->plane_res.scl_data.h_active;
- data.recout.width += next_pipe->plane_res.scl_data.recout.width;
- if (in->rotation == ROTATION_ANGLE_0 || in->rotation == ROTATION_ANGLE_180) {
- data.viewport.width += next_pipe->plane_res.scl_data.viewport.width;
- } else {
- data.viewport.height += next_pipe->plane_res.scl_data.viewport.height;
- }
- next_pipe = next_pipe->next_odm_pipe;
- }
+ temp_pipe->stream = pipe->stream;
+ temp_pipe->plane_state = pipe->plane_state;
+ temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
+
+ resource_build_scaling_params(temp_pipe);
break;
}
}
ASSERT(i < MAX_PIPES);
- return data;
+ return temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
@@ -866,7 +864,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->ScalerEnabled[location] = false;
}
-static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, const struct dc_state *context)
+static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_plane_state *in, struct dc_state *context)
{
const struct scaler_data scaler_data = get_scaler_data_for_plane(in, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 26307e599614..2a58a7687bdb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2,
in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
}
for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
+ if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) {
+ dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n",
+ __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ break;
+ }
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 5660f15da291..01493c49bd7a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1183,9 +1183,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
if (dccg) {
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
@@ -1476,7 +1476,7 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
-static enum dc_status apply_single_controller_ctx_to_hw(
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
@@ -2302,7 +2302,7 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
continue;
- status = apply_single_controller_ctx_to_hw(
+ status = dce110_apply_single_controller_ctx_to_hw(
pipe_ctx,
context,
dc);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 08028a1779ae..ed3cc3648e8e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context);
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index e931342fcf4c..931ac8ed7069 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -2561,7 +2561,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
-static void dcn20_reset_back_end_for_pipe(
+void dcn20_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -2790,18 +2790,17 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
}
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
- dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
-
- phyd32clk = get_phyd32clk_src(link);
- dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
-
dto_params.otg_inst = tg->inst;
dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
+
+ phyd32clk = get_phyd32clk_src(link);
+ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
} else {
if (dccg->funcs->enable_symclk_se)
dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index b94c85340abf..d950b3e54ec2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing(
void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 8e88dcaf88f5..7252f5f781f0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -206,28 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
{
struct abm *abm = pipe_ctx->stream_res.abm;
- uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
+ uint32_t otg_inst;
+
+ if (!abm || !tg || !panel_cntl)
+ return;
+
+ otg_inst = tg->inst;
if (dmcu) {
dce110_set_pipe(pipe_ctx);
return;
}
- if (abm && panel_cntl) {
- if (abm->funcs && abm->funcs->set_pipe_ex) {
- abm->funcs->set_pipe_ex(abm,
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm,
otg_inst,
SET_ABM_PIPE_NORMAL,
panel_cntl->inst,
panel_cntl->pwrseq_inst);
- } else {
- dmub_abm_set_pipe(abm, otg_inst,
- SET_ABM_PIPE_NORMAL,
- panel_cntl->inst,
- panel_cntl->pwrseq_inst);
- }
+ } else {
+ dmub_abm_set_pipe(abm, otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
}
}
@@ -237,34 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
{
struct dc_context *dc = pipe_ctx->stream->ctx;
struct abm *abm = pipe_ctx->stream_res.abm;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+ uint32_t otg_inst;
+
+ if (!abm || !tg || !panel_cntl)
+ return false;
+
+ otg_inst = tg->inst;
if (dc->dc->res_pool->dmcu) {
dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
return true;
}
- if (abm != NULL) {
- uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
-
- if (abm && panel_cntl) {
- if (abm->funcs && abm->funcs->set_pipe_ex) {
- abm->funcs->set_pipe_ex(abm,
- otg_inst,
- SET_ABM_PIPE_NORMAL,
- panel_cntl->inst,
- panel_cntl->pwrseq_inst);
- } else {
- dmub_abm_set_pipe(abm,
- otg_inst,
- SET_ABM_PIPE_NORMAL,
- panel_cntl->inst,
- panel_cntl->pwrseq_inst);
- }
- }
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm,
+ otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
+ } else {
+ dmub_abm_set_pipe(abm,
+ otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
}
- if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm)
+ if (abm->funcs && abm->funcs->set_backlight_level_pwm)
abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16,
frame_ramp, 0, panel_cntl->inst);
else
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 6c9299c7683d..aa36d7a56ca8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1474,9 +1474,44 @@ void dcn32_update_dsc_pg(struct dc *dc,
}
}
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) ||
+ (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ if (hws->funcs.reset_back_end_for_pipe)
+ hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+}
+
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
{
unsigned int i;
+ enum dc_status status = DC_OK;
+ struct dce_hwseq *hws = dc->hwseq;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1497,16 +1532,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
- if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream &&
+ pipe_ctx->stream->link->link_state_valid) {
+ continue;
}
+
+ if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
+ continue;
+
+ if (hws->funcs.apply_single_controller_ctx_to_hw)
+ status = hws->funcs.apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ ASSERT(status == DC_OK);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (hws->funcs.resync_fifo_dccg_dio)
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+#endif
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index cecf7f0f5671..069e20bc87c0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc,
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context);
+
void dcn32_init_blank(
struct dc *dc,
struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 427cfc8c24a4..e8ac94a005b8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
.enable_phantom_streams = dcn32_enable_phantom_streams,
+ .disable_phantom_streams = dcn32_disable_phantom_streams,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast,
@@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
.resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio,
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
+ .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
};
void dcn32_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 9c806385ecbd..8b6c49622f3b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -680,7 +680,7 @@ void dcn35_power_down_on_boot(struct dc *dc)
bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num;
+ int i, edp_num;
if (dc->debug.dmcub_emulation)
return true;
@@ -688,6 +688,13 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num == 0 || edp_num > 1)
return false;
+
+ for (i = 0; i < dc->current_state->stream_count; ++i) {
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal))
+ return false;
+ }
}
// TODO: review other cases when idle optimization is allowed
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index a54399383318..64ca7c66509b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -379,6 +379,7 @@ struct hw_sequencer_funcs {
struct dc_cursor_attributes *cursor_attr);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+ void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 6137cf09aa54..b3c62a82cb1c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -165,8 +165,15 @@ struct hwseq_private_funcs {
void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx);
void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
struct dc_state *context);
+ enum dc_status (*apply_single_controller_ctx_to_hw)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
#endif
+ void (*reset_back_end_for_pipe)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f74ae0d41d3c..3a6bf77a6873 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -469,6 +469,8 @@ struct resource_context {
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
bool is_mpc_3dlut_acquired[MAX_PIPES];
+ /* solely used for build scalar data in dml2 */
+ struct pipe_ctx temp_pipe;
};
struct dce_bw_output {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
index 5dcbaa2db964..e97d964a1791 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -57,7 +57,7 @@ struct panel_cntl_funcs {
struct panel_cntl_init_data {
struct dc_context *ctx;
uint32_t inst;
- uint32_t pwrseq_inst;
+ uint32_t eng_id;
};
struct panel_cntl {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index c958ef37b78a..77a60aa9f27b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -427,22 +427,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe);
int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe);
/*
- * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice
- * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for a plane with MPCC combine. otherwise
- * the number of MPC "cuts" for the plane.
+ * Get the number of MPC slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an MPC combine
+ * pipe topology.
*/
-int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head);
+int resource_get_mpc_slice_count(const struct pipe_ctx *pipe);
/*
- * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice
- * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for ODM combine. otherwise
- * the number of ODM "cuts" for the timing.
+ * Get the number of ODM slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an ODM combine
+ * pipe topology.
*/
-int resource_get_odm_slice_count(const struct pipe_ctx *otg_master);
+int resource_get_odm_slice_count(const struct pipe_ctx *pipe);
/* Get the ODM slice index counting from 0 from left most slice */
int resource_get_odm_slice_index(const struct pipe_ctx *opp_head);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 37d3027c32dc..cf22b8f28ba6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -370,30 +370,6 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
-static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link)
-{
- uint8_t pwrseq_inst = 0xF;
- struct dc_context *dc_ctx = link->dc->ctx;
-
- DC_LOGGER_INIT(dc_ctx->logger);
-
- switch (link->eng_id) {
- case ENGINE_ID_DIGA:
- pwrseq_inst = 0;
- break;
- case ENGINE_ID_DIGB:
- pwrseq_inst = 1;
- break;
- default:
- DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id);
- ASSERT(false);
- break;
- }
-
- return pwrseq_inst;
-}
-
-
static void link_destruct(struct dc_link *link)
{
int i;
@@ -657,7 +633,7 @@ static bool construct_phy(struct dc_link *link,
link->link_id.id == CONNECTOR_ID_LVDS)) {
panel_cntl_init_data.ctx = dc_ctx;
panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count;
- panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link);
+ panel_cntl_init_data.eng_id = link->eng_id;
link->panel_cntl =
link->dc->res_pool->funcs->panel_cntl_create(
&panel_cntl_init_data);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 8fe66c367850..5b0bc7f6a188 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -361,7 +361,7 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
int num_dpias = 0;
- for (uint8_t i = 0; i < num_streams; ++i) {
+ for (unsigned int i = 0; i < num_streams; ++i) {
if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
/* new dpia sst stream, check whether it exceeds max dpia */
if (num_dpias >= MAX_DPIA_NUM)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index dd0d2b206462..5491b707cec8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -196,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
struct dc_link *link_dpia_primary, *link_dpia_secondary;
int total_bw = 0;
- for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) {
+ for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) {
if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 5a0b04518956..16a62e018712 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -517,6 +517,7 @@ enum link_training_result dp_check_link_loss_status(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
union lane_status lane_status;
+ union lane_align_status_updated dpcd_lane_status_updated;
uint8_t dpcd_buf[6] = {0};
uint32_t lane;
@@ -532,10 +533,12 @@ enum link_training_result dp_check_link_loss_status(
* check lanes status
*/
lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane);
+ dpcd_lane_status_updated.raw = dpcd_buf[4];
if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
!lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
+ !lane_status.bits.SYMBOL_LOCKED_0 ||
+ !dp_is_interlane_aligned(dpcd_lane_status_updated)) {
/* if one of the channel equalization, clock
* recovery or symbol lock is dropped
* consider it as (link has been
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index e8dda44b23cb..5d36bab0029c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
uint32_t retries_eq = 0;
enum dc_status status;
enum dc_dp_training_pattern tr_pattern;
- uint32_t wait_time_microsec;
+ uint32_t wait_time_microsec = 0;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = {0};
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
index 5c9a30211c10..fc50931c2aec 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
@@ -205,7 +205,7 @@ enum dc_status core_link_read_dpcd(
uint32_t extended_size;
/* size of the remaining partitioned address space */
uint32_t size_left_to_read;
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
/* size of the next partition to be read from */
uint32_t partition_size;
uint32_t data_index = 0;
@@ -234,7 +234,7 @@ enum dc_status core_link_write_dpcd(
{
uint32_t partition_size;
uint32_t data_index = 0;
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
while (size) {
partition_size = dpcd_get_next_partition_size(address, size);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 511ff6b5b985..7538b548c572 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -999,7 +999,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id
vpg = dcn301_vpg_create(ctx, vpg_inst);
afmt = dcn301_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt) {
+ if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
kfree(enc1);
kfree(vpg);
kfree(afmt);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index c4d71e7f18af..6f10052caeef 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1829,7 +1829,21 @@ int dcn32_populate_dml_pipes_from_context(
dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt);
DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ if (dc->config.enable_windowed_mpo_odm &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ switch (resource_get_odm_slice_count(pipe)) {
+ case 2:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ break;
+ case 4:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
+ break;
+ default:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ }
+ } else {
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ }
pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 761ec9891875..5fdcda8f8602 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -780,8 +780,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
- .ips2_eval_delay_us = 200,
- .ips2_entry_delay_us = 400,
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
.static_screen_wait_frames = 2,
};
@@ -2130,6 +2130,7 @@ static bool dcn35_resource_construct(
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
+ dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index c64b6c848ef7..e699731ee68e 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -2832,6 +2832,7 @@ struct dmub_rb_cmd_psr_set_power_opt {
#define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
+# define REPLAY_RESIDENCY_MODE_IPS 0x10
#define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
# define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT)
@@ -2894,6 +2895,10 @@ enum dmub_cmd_replay_type {
* Set Residency Frameupdate Timer.
*/
DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER = 6,
+ /**
+ * Set pseudo vtotal
+ */
+ DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7,
};
/**
@@ -3077,6 +3082,26 @@ struct dmub_cmd_replay_set_timing_sync_data {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+struct dmub_cmd_replay_set_pseudo_vtotal {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Source Vtotal that Replay + IPS + ABM full screen video src vtotal
+ */
+ uint16_t vtotal;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad;
+};
+
+/**
* Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
*/
struct dmub_rb_cmd_replay_set_power_opt {
@@ -3157,6 +3182,20 @@ struct dmub_rb_cmd_replay_set_timing_sync {
};
/**
+ * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+struct dmub_rb_cmd_replay_set_pseudo_vtotal {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+ struct dmub_cmd_replay_set_pseudo_vtotal data;
+};
+
+/**
* Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
*/
struct dmub_cmd_replay_frameupdate_timer_data {
@@ -3207,6 +3246,10 @@ union dmub_replay_cmd_set {
* Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command data.
*/
struct dmub_cmd_replay_frameupdate_timer_data timer_data;
+ /**
+ * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data.
+ */
+ struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data;
};
/**
@@ -4358,6 +4401,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
*/
struct dmub_rb_cmd_replay_set_frameupdate_timer replay_set_frameupdate_timer;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+ struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal;
};
/**
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index ad98e504c00d..e304e8435fb8 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -980,6 +980,11 @@ void set_replay_coasting_vtotal(struct dc_link *link,
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
+void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
+{
+ link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal;
+}
+
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal)
{
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index c17bbc6fb38c..bef4815e1703 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -57,6 +57,7 @@ void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
uint16_t vtotal);
+void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 1dc5dd9b7bf7..df2c7ffe190f 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -258,6 +258,7 @@ enum DC_DEBUG_MASK {
DC_ENABLE_DML2 = 0x100,
DC_DISABLE_PSR_SU = 0x200,
DC_DISABLE_REPLAY = 0x400,
+ DC_DISABLE_IPS = 0x800,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/amdgpu_reg_state.h b/drivers/gpu/drm/amd/include/amdgpu_reg_state.h
index be519c8edf49..335980e2afbf 100644
--- a/drivers/gpu/drm/amd/include/amdgpu_reg_state.h
+++ b/drivers/gpu/drm/amd/include/amdgpu_reg_state.h
@@ -138,7 +138,7 @@ static inline size_t amdgpu_reginst_size(uint16_t num_inst, size_t inst_size,
}
#define amdgpu_asic_get_reg_state_supported(adev) \
- ((adev)->asic_funcs->get_reg_state ? 1 : 0)
+ (((adev)->asic_funcs && (adev)->asic_funcs->get_reg_state) ? 1 : 0)
#define amdgpu_asic_get_reg_state(adev, state, buf, size) \
((adev)->asic_funcs->get_reg_state ? \
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index df4f20293c16..eb4da3666e05 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = si_thermal_enable_alert(adev, false);
+ if (ret)
+ return ret;
+ ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ ret = si_set_temperature_range(adev);
+ if (ret)
+ return ret;
+#if 0 //TODO ?
+ si_dpm_powergate_uvd(adev, true);
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index c16703868e5c..0ad947df777a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <linux/pci.h>
+#include <linux/power_supply.h>
#include <linux/reboot.h>
#include "amdgpu.h"
@@ -733,7 +734,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- smu->smu_baco.state = SMU_BACO_STATE_NONE;
+ smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@@ -817,16 +818,8 @@ static int smu_late_init(void *handle)
* handle the switch automatically. Driver involvement
* is unnecessary.
*/
- if (!smu->dc_controlled_by_gpio) {
- ret = smu_set_power_source(smu,
- adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
- SMU_POWER_SOURCE_DC);
- if (ret) {
- dev_err(adev->dev, "Failed to switch to %s mode!\n",
- adev->pm.ac_power ? "AC" : "DC");
- return ret;
- }
- }
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0;
+ smu_set_ac_dc(smu);
if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
(amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
@@ -1961,31 +1954,10 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
return 0;
}
-static int smu_reset_mp1_state(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0;
-
- if ((!adev->in_runpm) && (!adev->in_suspend) &&
- (!amdgpu_in_reset(adev)))
- switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
- case IP_VERSION(13, 0, 0):
- case IP_VERSION(13, 0, 7):
- case IP_VERSION(13, 0, 10):
- ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
- int ret;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -2003,15 +1975,7 @@ static int smu_hw_fini(void *handle)
adev->pm.dpm_enabled = false;
- ret = smu_smc_hw_cleanup(smu);
- if (ret)
- return ret;
-
- ret = smu_reset_mp1_state(smu);
- if (ret)
- return ret;
-
- return 0;
+ return smu_smc_hw_cleanup(smu);
}
static void smu_late_fini(void *handle)
@@ -2710,6 +2674,7 @@ int smu_get_power_limit(void *handle,
case SMU_PPT_LIMIT_CURRENT:
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 2aa4fea87314..66e84defd0b6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -424,7 +424,6 @@ enum smu_reset_mode {
enum smu_baco_state {
SMU_BACO_STATE_ENTER = 0,
SMU_BACO_STATE_EXIT,
- SMU_BACO_STATE_NONE,
};
struct smu_baco_context {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 4cd43bbec910..bcad42534da4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1303,13 +1303,12 @@ static int arcturus_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 8d1d29ffb0f1..ed189a3878eb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2357,13 +2357,12 @@ static int navi10_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (smu->od_enabled &&
- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 21fc033528fa..e2ad2b972ab0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -640,13 +640,12 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 5a314d0316c1..c7bfa68bf00f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1442,10 +1442,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n");
schedule_work(&smu->interrupt_work);
+ adev->pm.ac_power = true;
break;
case 0x4:
dev_dbg(adev->dev, "Switched to DC mode!\n");
schedule_work(&smu->interrupt_work);
+ adev->pm.ac_power = false;
break;
case 0x7:
/*
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 771a3d457c33..c486182ff275 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1379,10 +1379,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n");
smu_v13_0_ack_ac_dc_interrupt(smu);
+ adev->pm.ac_power = true;
break;
case 0x4:
dev_dbg(adev->dev, "Switched to DC mode!\n");
smu_v13_0_ack_ac_dc_interrupt(smu);
+ adev->pm.ac_power = false;
break;
case 0x7:
/*
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index a9b25faa63e4..9b80f18ea6c3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2357,6 +2357,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
@@ -2368,19 +2369,18 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
if (max_power_limit) {
- *max_power_limit = power_limit * (100 + od_percent_upper);
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
*max_power_limit /= 100;
}
@@ -2747,13 +2747,7 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_PrepareMp1ForUnload,
- 0x55, NULL);
-
- if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
- ret = smu_v13_0_disable_pmfw_state(smu);
-
+ ret = smu_cmn_set_mp1_state(smu, mp1_state);
break;
default:
/* Ignore others */
@@ -2949,7 +2943,7 @@ static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- switch (adev->ip_versions[MP1_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
return smu->smc_fw_version >= 0x004e6300;
case IP_VERSION(13, 0, 10):
@@ -2959,6 +2953,55 @@ static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
}
}
+static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (limit <= msg_limit) {
+ if (smu->current_power_limit > msg_limit) {
+ od_table->OverDriveTable.Ppt = 0;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+ }
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+ } else if (smu->od_enabled) {
+ ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+ if (ret)
+ return ret;
+
+ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3013,7 +3056,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.set_fan_control_mode = smu_v13_0_set_fan_control_mode,
.enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost,
.get_power_limit = smu_v13_0_0_get_power_limit,
- .set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_limit = smu_v13_0_0_set_power_limit,
.set_power_source = smu_v13_0_set_power_source,
.get_power_profile_mode = smu_v13_0_0_get_power_profile_mode,
.set_power_profile_mode = smu_v13_0_0_set_power_profile_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 3c98a8a0386a..7e1941cf1796 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -160,8 +160,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1),
MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1),
- MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
- MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0),
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 59606a19e3d2..3dc7b60cb075 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2321,6 +2321,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?
@@ -2332,19 +2333,18 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
if (max_power_limit) {
- *max_power_limit = power_limit * (100 + od_percent_upper);
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
*max_power_limit /= 100;
}
@@ -2504,13 +2504,7 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
switch (mp1_state) {
case PP_MP1_STATE_UNLOAD:
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_PrepareMp1ForUnload,
- 0x55, NULL);
-
- if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
- ret = smu_v13_0_disable_pmfw_state(smu);
-
+ ret = smu_cmn_set_mp1_state(smu, mp1_state);
break;
default:
/* Ignore others */
@@ -2545,6 +2539,55 @@ static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu)
return smu->smc_fw_version > 0x00524600;
}
+static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ SkuTable_t *skutable = &pptable->SkuTable;
+ uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTableExternal_t *od_table =
+ (OverDriveTableExternal_t *)table_context->overdrive_table;
+ int ret = 0;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
+ if (limit <= msg_limit) {
+ if (smu->current_power_limit > msg_limit) {
+ od_table->OverDriveTable.Ppt = 0;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+ }
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
+ } else if (smu->od_enabled) {
+ ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+ if (ret)
+ return ret;
+
+ od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+ ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+ return ret;
+ }
+
+ smu->current_power_limit = limit;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2596,7 +2639,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.set_fan_control_mode = smu_v13_0_set_fan_control_mode,
.enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
.get_power_limit = smu_v13_0_7_get_power_limit,
- .set_power_limit = smu_v13_0_set_power_limit,
+ .set_power_limit = smu_v13_0_7_set_power_limit,
.set_power_source = smu_v13_0_set_power_source,
.get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
.set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index ef31033439bc..29d91493b101 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1762,6 +1762,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
u8 request = msg->request & ~DP_AUX_I2C_MOT;
int ret = 0;
+ mutex_lock(&ctx->aux_lock);
pm_runtime_get_sync(dev);
msg->reply = 0;
switch (request) {
@@ -1778,6 +1779,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
msg->size, msg->buffer);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+ mutex_unlock(&ctx->aux_lock);
return ret;
}
@@ -2474,7 +2476,9 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
ctx->connector = NULL;
anx7625_dp_stop(ctx);
- pm_runtime_put_sync(dev);
+ mutex_lock(&ctx->aux_lock);
+ pm_runtime_put_sync_suspend(dev);
+ mutex_unlock(&ctx->aux_lock);
}
static enum drm_connector_status
@@ -2668,6 +2672,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
mutex_init(&platform->lock);
mutex_init(&platform->hdcp_wq_lock);
+ mutex_init(&platform->aux_lock);
INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func);
platform->hdcp_workqueue = create_workqueue("hdcp workqueue");
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index 66ebee7f3d83..39ed35d33836 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -475,6 +475,8 @@ struct anx7625_data {
struct workqueue_struct *hdcp_workqueue;
/* Lock for hdcp work queue */
struct mutex hdcp_wq_lock;
+ /* Lock for aux transfer and disable */
+ struct mutex aux_lock;
char edid_block;
struct display_timing dt;
u8 display_timing_valid;
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index bb55f697a181..6886db2d9e00 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -25,20 +25,18 @@ static void drm_aux_hpd_bridge_release(struct device *dev)
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
of_node_put(adev->dev.platform_data);
+ of_node_put(adev->dev.of_node);
kfree(adev);
}
-static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
+static void drm_aux_hpd_bridge_free_adev(void *_adev)
{
- struct auxiliary_device *adev = _adev;
-
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
+ auxiliary_device_uninit(_adev);
}
/**
- * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge
+ * devm_drm_dp_hpd_bridge_alloc - allocate a HPD DisplayPort bridge
* @parent: device instance providing this bridge
* @np: device node pointer corresponding to this bridge instance
*
@@ -46,11 +44,9 @@ static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
* DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
* able to send the HPD events.
*
- * Return: device instance that will handle created bridge or an error code
- * encoded into the pointer.
+ * Return: bridge auxiliary device pointer or an error pointer
*/
-struct device *drm_dp_hpd_bridge_register(struct device *parent,
- struct device_node *np)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np)
{
struct auxiliary_device *adev;
int ret;
@@ -74,18 +70,62 @@ struct device *drm_dp_hpd_bridge_register(struct device *parent,
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.platform_data);
+ of_node_put(adev->dev.of_node);
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
kfree(adev);
return ERR_PTR(ret);
}
- ret = auxiliary_device_add(adev);
- if (ret) {
- auxiliary_device_uninit(adev);
+ ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_free_adev, adev);
+ if (ret)
return ERR_PTR(ret);
- }
- ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev);
+ return adev;
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_alloc);
+
+static void drm_aux_hpd_bridge_del_adev(void *_adev)
+{
+ auxiliary_device_delete(_adev);
+}
+
+/**
+ * devm_drm_dp_hpd_bridge_add - register a HDP DisplayPort bridge
+ * @dev: struct device to tie registration lifetime to
+ * @adev: bridge auxiliary device to be registered
+ *
+ * Returns: zero on success or a negative errno
+ */
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
+{
+ int ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, drm_aux_hpd_bridge_del_adev, adev);
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_add);
+
+/**
+ * drm_dp_hpd_bridge_register - allocate and register a HDP DisplayPort bridge
+ * @parent: device instance providing this bridge
+ * @np: device node pointer corresponding to this bridge instance
+ *
+ * Return: device instance that will handle created bridge or an error pointer
+ */
+struct device *drm_dp_hpd_bridge_register(struct device *parent, struct device_node *np)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = devm_drm_dp_hpd_bridge_alloc(parent, np);
+ if (IS_ERR(adev))
+ return ERR_CAST(adev);
+
+ ret = devm_drm_dp_hpd_bridge_add(parent, adev);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 541e4f5afc4c..14d4dcf239da 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -107,6 +107,7 @@ struct ps8640 {
struct device_link *link;
bool pre_enabled;
bool need_post_hpd_delay;
+ struct mutex aux_lock;
};
static const struct regmap_config ps8640_regmap_config[] = {
@@ -345,11 +346,20 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
int ret;
+ mutex_lock(&ps_bridge->aux_lock);
pm_runtime_get_sync(dev);
+ ret = _ps8640_wait_hpd_asserted(ps_bridge, 200 * 1000);
+ if (ret) {
+ pm_runtime_put_sync_suspend(dev);
+ goto exit;
+ }
ret = ps8640_aux_transfer_msg(aux, msg);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+exit:
+ mutex_unlock(&ps_bridge->aux_lock);
+
return ret;
}
@@ -470,7 +480,18 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
ps_bridge->pre_enabled = false;
ps8640_bridge_vdo_control(ps_bridge, DISABLE);
+
+ /*
+ * The bridge seems to expect everything to be power cycled at the
+ * disable process, so grab a lock here to make sure
+ * ps8640_aux_transfer() is not holding a runtime PM reference and
+ * preventing the bridge from suspend.
+ */
+ mutex_lock(&ps_bridge->aux_lock);
+
pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev);
+
+ mutex_unlock(&ps_bridge->aux_lock);
}
static int ps8640_bridge_attach(struct drm_bridge *bridge,
@@ -619,6 +640,8 @@ static int ps8640_probe(struct i2c_client *client)
if (!ps_bridge)
return -ENOMEM;
+ mutex_init(&ps_bridge->aux_lock);
+
ps_bridge->supplies[0].supply = "vdd12";
ps_bridge->supplies[1].supply = "vdd33";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index be5914caa17d..63a1a0c88be4 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -969,10 +969,6 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
reg &= ~DSIM_STOP_STATE_CNT_MASK;
reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
-
- if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
- reg |= DSIM_FORCE_STOP_STATE;
-
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
@@ -1431,18 +1427,6 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
disable_irq(dsi->irq);
}
-static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable)
-{
- u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
-
- if (enable)
- reg |= DSIM_FORCE_STOP_STATE;
- else
- reg &= ~DSIM_FORCE_STOP_STATE;
-
- samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
-}
-
static int samsung_dsim_init(struct samsung_dsim *dsi)
{
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
@@ -1492,9 +1476,6 @@ static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
ret = samsung_dsim_init(dsi);
if (ret)
return;
-
- samsung_dsim_set_display_mode(dsi);
- samsung_dsim_set_display_enable(dsi, true);
}
}
@@ -1503,12 +1484,8 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
- samsung_dsim_set_display_mode(dsi);
- samsung_dsim_set_display_enable(dsi, true);
- } else {
- samsung_dsim_set_stop_state(dsi, false);
- }
+ samsung_dsim_set_display_mode(dsi);
+ samsung_dsim_set_display_enable(dsi, true);
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
}
@@ -1521,9 +1498,6 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
- if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
- samsung_dsim_set_stop_state(dsi, true);
-
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
}
@@ -1828,8 +1802,6 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
if (ret)
return ret;
- samsung_dsim_set_stop_state(dsi, false);
-
ret = mipi_dsi_create_packet(&xfer.packet, msg);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 2bdc5b439beb..4560ae9cbce1 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1080,6 +1080,26 @@ static int sii902x_init(struct sii902x *sii902x)
return ret;
}
+ ret = sii902x_audio_codec_init(sii902x, dev);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(sii902x->i2c, sii902x);
+
+ sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
+ 1, 0, I2C_MUX_GATE,
+ sii902x_i2c_bypass_select,
+ sii902x_i2c_bypass_deselect);
+ if (!sii902x->i2cmux) {
+ ret = -ENOMEM;
+ goto err_unreg_audio;
+ }
+
+ sii902x->i2cmux->priv = sii902x;
+ ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
+ if (ret)
+ goto err_unreg_audio;
+
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
@@ -1090,19 +1110,13 @@ static int sii902x_init(struct sii902x *sii902x)
drm_bridge_add(&sii902x->bridge);
- sii902x_audio_codec_init(sii902x, dev);
-
- i2c_set_clientdata(sii902x->i2c, sii902x);
+ return 0;
- sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
- 1, 0, I2C_MUX_GATE,
- sii902x_i2c_bypass_select,
- sii902x_i2c_bypass_deselect);
- if (!sii902x->i2cmux)
- return -ENOMEM;
+err_unreg_audio:
+ if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
+ platform_device_unregister(sii902x->audio.pdev);
- sii902x->i2cmux->priv = sii902x;
- return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
+ return ret;
}
static int sii902x_probe(struct i2c_client *client)
@@ -1170,12 +1184,14 @@ static int sii902x_probe(struct i2c_client *client)
}
static void sii902x_remove(struct i2c_client *client)
-
{
struct sii902x *sii902x = i2c_get_clientdata(client);
- i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge);
+ i2c_mux_del_adapters(sii902x->i2cmux);
+
+ if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
+ platform_device_unregister(sii902x->audio.pdev);
}
static const struct of_device_id sii902x_dt_ids[] = {
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index bd6c24d4213c..f7c6b60629c2 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -5491,6 +5491,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
* - 0 if the new state is valid
* - %-ENOSPC, if the new state is invalid, because of BW limitation
* @failing_port is set to:
+ *
* - The non-root port where a BW limit check failed
* with all the ports downstream of @failing_port passing
* the BW limit check.
@@ -5499,6 +5500,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
* - %NULL if the BW limit check failed at the root port
* with all the ports downstream of the root port passing
* the BW limit check.
+ *
* - %-EINVAL, if the new state is invalid, because the root port has
* too many payloads.
*/
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index f57e6d74fb0e..5ebdd6f8f36e 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
u64 start, u64 end,
unsigned int order)
{
+ u64 req_size = mm->chunk_size << order;
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(dfs);
@@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
if (drm_buddy_block_is_allocated(block))
continue;
+ if (block_start < start || block_end > end) {
+ u64 adjusted_start = max(block_start, start);
+ u64 adjusted_end = min(block_end, end);
+
+ if (round_down(adjusted_end + 1, req_size) <=
+ round_up(adjusted_start, req_size))
+ continue;
+ }
+
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
/*
@@ -538,7 +548,13 @@ static int __alloc_range(struct drm_buddy *mm,
list_add(&block->left->tmp_link, dfs);
} while (1);
+ if (total_allocated < size) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
list_splice_tail(&allocated, blocks);
+
return 0;
err_undo:
@@ -755,8 +771,12 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
return -EINVAL;
/* Actual range allocation */
- if (start + size == end)
+ if (start + size == end) {
+ if (!IS_ALIGNED(start | end, min_block_size))
+ return -EINVAL;
+
return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
+ }
original_size = size;
original_min_size = min_block_size;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index cb90e70d85e8..65f9f66933bb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -904,6 +904,7 @@ out:
connector_set = NULL;
fb = NULL;
mode = NULL;
+ num_connectors = 0;
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 834a5e28abbe..7352bde299d5 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -820,7 +820,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
if (max_segment == 0)
max_segment = UINT_MAX;
err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT,
+ (unsigned long)nr_pages << PAGE_SHIFT,
max_segment, GFP_KERNEL);
if (err) {
kfree(sg);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 84101baeecc6..a6c19de46292 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1040,7 +1040,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
lockdep_assert_none_held_once();
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
@@ -1109,7 +1110,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
* fallthough and try a 0 timeout wait!
*/
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
@@ -1416,10 +1418,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
/* This happens inside the syncobj lock */
fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+ if (!fence)
+ return;
+
ret = dma_fence_chain_find_seqno(&fence, entry->point);
- if (ret != 0 || !fence) {
+ if (ret != 0) {
+ /* The given seqno has not been submitted yet. */
dma_fence_put(fence);
return;
+ } else if (!fence) {
+ /* If dma_fence_chain_find_seqno returns 0 but sets the fence
+ * to NULL, it implies that the given seqno is signaled and a
+ * later seqno has already been submitted. Assign a stub fence
+ * so that the eventfd still gets signaled below.
+ */
+ fence = dma_fence_get_stub();
}
list_del_init(&entry->node);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 776f2f0b602d..0ef7bc8848b0 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -319,9 +319,9 @@ static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
struct drm_framebuffer *fb)
{
- struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
struct exynos_drm_plane_state *state =
- to_exynos_plane_state(plane.base.state);
+ to_exynos_plane_state(plane->base.state);
unsigned int alpha = state->base.alpha;
unsigned int pixel_alpha;
unsigned long val;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a9f1c5c05894..f2145227a1e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -480,7 +480,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
struct fimd_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
const struct fimd_driver_data *driver_data = ctx->driver_data;
- void *timing_base = ctx->regs + driver_data->timing_base;
+ void __iomem *timing_base = ctx->regs + driver_data->timing_base;
u32 val;
if (ctx->suspended)
@@ -661,9 +661,9 @@ static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
struct drm_framebuffer *fb, int width)
{
- struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane *plane = &ctx->planes[win];
struct exynos_drm_plane_state *state =
- to_exynos_plane_state(plane.base.state);
+ to_exynos_plane_state(plane->base.state);
uint32_t pixel_format = fb->format->format;
unsigned int alpha = state->base.alpha;
u32 val = WINCONx_ENWIN;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index e9a769590415..180507a47700 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1341,7 +1341,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
for (i = 0; i < ctx->num_clocks; i++) {
ret = clk_prepare_enable(ctx->clocks[i]);
if (ret) {
- while (--i > 0)
+ while (--i >= 0)
clk_disable_unprepare(ctx->clocks[i]);
return ret;
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index b5d6e3352071..3089029abba4 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT
Note that this driver only supports newer device from Broadwell on.
For further information and setup guide, you can visit:
- http://01.org/igvt-g.
+ https://github.com/intel/gvt-linux/wiki.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e777686190ca..c13f14edb508 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -17,7 +17,6 @@ subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
-subdir-ccflags-y += $(call cc-option, -Wstringop-overflow)
subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
# The following turn off the warnings enabled by -Wextra
ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index ac456a2275db..eda4a8b88590 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1155,6 +1155,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder);
@@ -1255,8 +1256,6 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
-
/* step7: enable backlight */
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f5ef95da5534..ae647d03af25 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2355,6 +2355,9 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits->min_rate = intel_dp_common_rate(intel_dp, 0);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
+ /* FIXME 128b/132b SST support missing */
+ limits->max_rate = min(limits->max_rate, 810000);
+
limits->min_lane_count = 1;
limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 8f702c3fc62d..57bbf3e3af92 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -1525,8 +1525,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* can rely on frontbuffer tracking.
*/
mask = EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP;
+ EDP_PSR_DEBUG_MASK_HPD;
+
+ /*
+ * For some unknown reason on HSW non-ULT (or at least on
+ * Dell Latitude E6540) external displays start to flicker
+ * when PSR is enabled on the eDP. SR/PC6 residency is much
+ * higher than should be possible with an external display.
+ * As a workaround leave LPSP unmasked to prevent PSR entry
+ * when external displays are active.
+ */
+ if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
+ mask |= EDP_PSR_DEBUG_MASK_LPSP;
if (DISPLAY_VER(dev_priv) < 20)
mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index acc6b6804105..2915d7afe5cc 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1209,7 +1209,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_tv_format format;
u32 format_map;
- format_map = 1 << conn_state->tv.mode;
+ format_map = 1 << conn_state->tv.legacy_mode;
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
@@ -2298,7 +2298,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
* Read the list of supported input resolutions for the selected TV
* format.
*/
- format_map = 1 << conn_state->tv.mode;
+ format_map = 1 << conn_state->tv.legacy_mode;
memcpy(&tv_res, &format_map,
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
@@ -2363,7 +2363,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
int i;
for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
- if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
+ if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
*val = i;
return 0;
@@ -2419,7 +2419,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
- state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
+ state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
if (state->crtc) {
struct drm_crtc_state *crtc_state =
@@ -3076,7 +3076,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
drm_property_add_enum(intel_sdvo_connector->tv_format, i,
tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
- intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
+ intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index d4386cb3569e..992a725de751 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -949,7 +949,7 @@ intel_disable_tv(struct intel_atomic_state *state,
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
{
- int format = conn_state->tv.mode;
+ int format = conn_state->tv.legacy_mode;
return &tv_modes[format];
}
@@ -1704,7 +1704,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
break;
}
- connector->state->tv.mode = i;
+ connector->state->tv.legacy_mode = i;
}
static int
@@ -1859,7 +1859,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector,
old_state = drm_atomic_get_old_connector_state(state, connector);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
- if (old_state->tv.mode != new_state->tv.mode ||
+ if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
old_state->tv.margins.left != new_state->tv.margins.left ||
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
@@ -1896,7 +1896,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
conn_state->tv.margins.right = 46;
conn_state->tv.margins.bottom = 37;
- conn_state->tv.mode = 0;
+ conn_state->tv.legacy_mode = 0;
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
@@ -1910,7 +1910,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
drm_object_attach_property(&connector->base,
i915->drm.mode_config.legacy_tv_mode_property,
- conn_state->tv.mode);
+ conn_state->tv.legacy_mode);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tv_left_margin_property,
conn_state->tv.margins.left);
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
index 64f440fdc22b..8b21dc8e26d5 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -51,8 +51,8 @@
#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
#define _DSCA_PPS_0 0x6B200
#define _DSCC_PPS_0 0x6BA00
-#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + (pps) * 4)
-#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + (pps) * 4)
+#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
+#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 90f6c1ece57d..efcb00472be2 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2849,8 +2849,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
for (i = start; i < end; i += 4) {
p = intel_gvt_find_mmio_info(gvt, i);
if (p) {
- WARN(1, "dup mmio definition offset %x\n",
- info->offset);
+ WARN(1, "dup mmio definition offset %x\n", i);
/* We return -EEXIST here to make GVT-g load fail.
* So duplicated MMIO can be found as soon as
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index e98b6d69a91a..9b6d87c8b583 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -41,7 +41,7 @@
* To virtualize GPU resources GVT-g driver depends on hypervisor technology
* e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
* and be virtualized within GVT-g device module. More architectural design
- * doc is available on https://01.org/group/2230/documentation-list.
+ * doc is available on https://github.com/intel/gvt-linux/wiki.
*/
static LIST_HEAD(intel_gvt_devices);
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index 3f73b211fa8e..3407450435e2 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_CVBS]) {
meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
drm_bridge_remove(&meson_encoder_cvbs->bridge);
- drm_bridge_remove(meson_encoder_cvbs->next_bridge);
}
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 3f93c70488ca..311b91630fbe 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -168,6 +168,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_DSI]) {
meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
drm_bridge_remove(&meson_encoder_dsi->bridge);
- drm_bridge_remove(meson_encoder_dsi->next_bridge);
}
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 25ea76558690..c4686568c9ca 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -474,6 +474,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_HDMI]) {
meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
drm_bridge_remove(&meson_encoder_hdmi->bridge);
- drm_bridge_remove(meson_encoder_hdmi->next_bridge);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index c0bc924cd302..c9c55e2ea584 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1287,7 +1287,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.highest_bank_bit = 15;
if (adreno_is_a610(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 14;
+ gpu->ubwc_config.highest_bank_bit = 13;
gpu->ubwc_config.min_acc_len = 1;
gpu->ubwc_config.ubwc_mode = 1;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 83380bc92a00..6a4b489d44e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -144,10 +144,6 @@ enum dpu_enc_rc_states {
* to track crtc in the disable() hook which is called
* _after_ encoder_mask is cleared.
* @connector: If a mode is set, cached pointer to the active connector
- * @crtc_kickoff_cb: Callback into CRTC that will flush & start
- * all CTL paths
- * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
- * @debugfs_root: Debug file system root file node
* @enc_lock: Lock around physical encoder
* create/destroy/enable/disable
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
@@ -2072,7 +2068,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
}
/* reset the merge 3D HW block */
- if (phys_enc->hw_pp->merge_3d) {
+ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
BLEND_3D_NONE);
if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
@@ -2103,7 +2099,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_wb)
intf_cfg.wb = phys_enc->hw_wb->idx;
- if (phys_enc->hw_pp->merge_3d)
+ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
if (ctl->ops.reset_intf_cfg)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index b58a9c2ae326..724537ab776d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -29,7 +29,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
* @topology: selected topology for the display
- * @hw_res: Hardware resources required as reported by the encoders
*/
struct dpu_rm_requirements {
struct msm_display_topology topology;
@@ -204,6 +203,8 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
* _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
* @rm: dpu resource manager handle
* @primary_idx: index of primary mixer in rm->mixer_blks[]
+ *
+ * Returns: lm peer mixed id on success or %-EINVAL on error
*/
static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 77a8d9366ed7..fb588fde298a 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -135,11 +135,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
tbd = dp_link_get_test_bits_depth(ctrl->link,
ctrl->panel->dp_mode.bpp);
- if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
- pr_debug("BIT_DEPTH not set. Configure default\n");
- tbd = DP_TEST_BIT_DEPTH_8;
- }
-
config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
/* Num of Lanes */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index d37d599aec27..4c72124ffb5d 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -329,10 +329,26 @@ static const struct component_ops dp_display_comp_ops = {
.unbind = dp_display_unbind,
};
+static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+ struct drm_connector *connector;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ connector = dp->dp_display.connector;
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
- struct drm_bridge *bridge = dp->dp_display.bridge;
+ if ((hpd && dp->dp_display.link_ready) ||
+ (!hpd && !dp->dp_display.link_ready)) {
+ drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
+ (hpd ? "on" : "off"));
+ return 0;
+ }
/* reset video pattern flag on disconnect */
if (!hpd) {
@@ -348,7 +364,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
dp->dp_display.connector_type, hpd);
- drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready);
+ dp_display_send_hpd_event(&dp->dp_display);
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 98427d45e9a7..49dfac1fd1ef 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -7,6 +7,7 @@
#include <drm/drm_print.h>
+#include "dp_reg.h"
#include "dp_link.h"
#include "dp_panel.h"
@@ -1082,7 +1083,7 @@ int dp_link_process_request(struct dp_link *dp_link)
int dp_link_get_colorimetry_config(struct dp_link *dp_link)
{
- u32 cc;
+ u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
struct dp_link_private *link;
if (!dp_link) {
@@ -1096,10 +1097,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
* Unless a video pattern CTS test is ongoing, use RGB_VESA
* Only RGB_VESA and RGB_CEA supported for now
*/
- if (dp_link_is_video_pattern_requested(link))
- cc = link->dp_link.test_video.test_dyn_range;
- else
- cc = DP_TEST_DYNAMIC_RANGE_VESA;
+ if (dp_link_is_video_pattern_requested(link)) {
+ if (link->dp_link.test_video.test_dyn_range &
+ DP_TEST_DYNAMIC_RANGE_CEA)
+ cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
+ }
return cc;
}
@@ -1179,6 +1181,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
{
u32 tbd;
+ struct dp_link_private *link;
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
/*
* Few simplistic rules and assumptions made here:
@@ -1196,12 +1201,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
tbd = DP_TEST_BIT_DEPTH_10;
break;
default:
- tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+ drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n",
+ bpp);
+ tbd = DP_TEST_BIT_DEPTH_8;
break;
}
- if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
- tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+ tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
return tbd;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index ea85a691e72b..78785ed4b40c 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -143,6 +143,9 @@
#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
+#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0)
+#define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04)
+
#define REG_DP_VALID_BOUNDARY (0x00000030)
#define REG_DP_VALID_BOUNDARY_2 (0x00000034)
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 5f68e31a3e4e..0915f3b68752 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -26,7 +26,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
void *vaddr;
- vaddr = msm_gem_get_vaddr(obj);
+ vaddr = msm_gem_get_vaddr_locked(obj);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
iosys_map_set_vaddr(map, vaddr);
@@ -36,7 +36,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- msm_gem_put_vaddr(obj);
+ msm_gem_put_vaddr_locked(obj);
}
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 095390774f22..655002b21b0d 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
- pm_runtime_get_sync(&gpu->pdev->dev);
+ WARN_ON(!mutex_is_locked(&gpu->lock));
- mutex_lock(&gpu->lock);
+ pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_hw_init(gpu);
+ submit->seqno = submit->hw_fence->seqno;
+
update_sw_cntrs(gpu);
/*
@@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
gpu->funcs->submit(gpu, submit);
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
- hangcheck_timer_reset(gpu);
-
- mutex_unlock(&gpu->lock);
-
pm_runtime_put(&gpu->pdev->dev);
+ hangcheck_timer_reset(gpu);
}
/*
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 5cc8d358cc97..d5512037c38b 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -21,6 +21,8 @@ struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
+ const struct iommu_flush_ops *tlb;
+ struct device *iommu_dev;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
@@ -201,11 +203,33 @@ static const struct msm_mmu_funcs pagetable_funcs = {
static void msm_iommu_tlb_flush_all(void *cookie)
{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct adreno_smmu_priv *adreno_smmu;
+
+ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
+ return;
+
+ adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
+
+ pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
+
+ pm_runtime_put_autosuspend(pagetable->iommu_dev);
}
static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct adreno_smmu_priv *adreno_smmu;
+
+ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
+ return;
+
+ adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
+
+ pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
+
+ pm_runtime_put_autosuspend(pagetable->iommu_dev);
}
static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
@@ -213,7 +237,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
{
}
-static const struct iommu_flush_ops null_tlb_ops = {
+static const struct iommu_flush_ops tlb_ops = {
.tlb_flush_all = msm_iommu_tlb_flush_all,
.tlb_flush_walk = msm_iommu_tlb_flush_walk,
.tlb_add_page = msm_iommu_tlb_add_page,
@@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* The incoming cfg will have the TTBR1 quirk enabled */
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
- ttbr0_cfg.tlb = &null_tlb_ops;
+ ttbr0_cfg.tlb = &tlb_ops;
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
- &ttbr0_cfg, iommu->domain);
+ &ttbr0_cfg, pagetable);
if (!pagetable->pgtbl_ops) {
kfree(pagetable);
@@ -279,6 +303,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* Needed later for TLB flush */
pagetable->parent = parent;
+ pagetable->tlb = ttbr1_cfg->tlb;
+ pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 455b2e3a0cdd..35423d10aafa 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -562,6 +562,7 @@ static const struct msm_mdss_data sdm670_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sdm845_data = {
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 4bc13f7d005a..9d6655f96f0c 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_fence_init(submit->hw_fence, fctx);
- submit->seqno = submit->hw_fence->seqno;
-
mutex_lock(&priv->lru.lock);
for (i = 0; i < submit->nr_bos; i++) {
@@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
mutex_unlock(&priv->lru.lock);
+ /* TODO move submit path over to using a per-ring lock.. */
+ mutex_lock(&gpu->lock);
+
msm_gpu_submit(gpu, submit);
+ mutex_unlock(&gpu->lock);
+
return dma_fence_get(submit->hw_fence);
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 1e6aaf95ff7c..ceef470c9fbf 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -100,3 +100,11 @@ config DRM_NOUVEAU_SVM
help
Say Y here if you want to enable experimental support for
Shared Virtual Memory (SVM).
+
+config DRM_NOUVEAU_GSP_DEFAULT
+ bool "Use GSP firmware for Turing/Ampere (needs firmware installed)"
+ depends on DRM_NOUVEAU
+ default n
+ help
+ Say Y here if you want to use the GSP codepaths by default on
+ Turing and Ampere GPUs.
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index d1437c08645f..6f5d376d8fcc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -9,7 +9,7 @@
#define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT)
struct nvkm_gsp_mem {
- u32 size;
+ size_t size;
void *data;
dma_addr_t addr;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index a04156ca8390..80f74ee0fc78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -128,12 +128,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
struct nouveau_abi16_ntfy *ntfy, *temp;
/* Cancel all jobs from the entity's queue. */
- drm_sched_entity_fini(&chan->sched.entity);
+ if (chan->sched)
+ drm_sched_entity_fini(&chan->sched->entity);
if (chan->chan)
nouveau_channel_idle(chan->chan);
- nouveau_sched_fini(&chan->sched);
+ if (chan->sched)
+ nouveau_sched_destroy(&chan->sched);
/* cleanup notifier state */
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
@@ -197,6 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
+ struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -261,6 +264,14 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
break;
}
+ case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
+ getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
+ break;
+ case NOUVEAU_GETPARAM_VRAM_USED: {
+ struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
+ getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
+ break;
+ }
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
@@ -337,10 +348,16 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq,
- chan->chan->dma.ib_max);
- if (ret)
- goto done;
+ /* If we're not using the VM_BIND uAPI, we don't need a scheduler.
+ *
+ * The client lock is already acquired by nouveau_abi16_get().
+ */
+ if (nouveau_cli_uvmm(cli)) {
+ ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
+ chan->chan->dma.ib_max);
+ if (ret)
+ goto done;
+ }
init->channel = chan->chan->chid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 1f5e243c0c75..11c8c4a80079 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -26,7 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
- struct nouveau_sched sched;
+ struct nouveau_sched *sched;
};
struct nouveau_abi16 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 6f6c31a9937b..a947e1d5f309 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -201,7 +201,8 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
- nouveau_sched_fini(&cli->sched);
+ if (cli->sched)
+ nouveau_sched_destroy(&cli->sched);
if (uvmm)
nouveau_uvmm_fini(uvmm);
nouveau_vmm_fini(&cli->svm);
@@ -311,7 +312,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
cli->mem = &mems[ret];
/* Don't pass in the (shared) sched_wq in order to let
- * nouveau_sched_init() create a dedicated one for VM_BIND jobs.
+ * nouveau_sched_create() create a dedicated one for VM_BIND jobs.
*
* This is required to ensure that for VM_BIND jobs free_job() work and
* run_job() work can always run concurrently and hence, free_job() work
@@ -320,7 +321,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
* locks which indirectly or directly are held for allocations
* elsewhere.
*/
- ret = nouveau_sched_init(&cli->sched, drm, NULL, 1);
+ ret = nouveau_sched_create(&cli->sched, drm, NULL, 1);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8a6d94c8b163..e239c6bf4afa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -98,7 +98,7 @@ struct nouveau_cli {
bool disabled;
} uvmm;
- struct nouveau_sched sched;
+ struct nouveau_sched *sched;
const struct nvif_mclass *mem;
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index bc5d71b79ab2..e65c0ef23bc7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -389,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (ret)
goto out;
- args.sched = &chan16->sched;
+ args.sched = chan16->sched;
args.file_priv = file_priv;
args.chan = chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 5057d976fa57..93f08f9479d8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -62,7 +62,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- if (atomic_dec_and_test(&fctx->notify_ref))
+ if (!--fctx->notify_ref)
drop = 1;
}
@@ -103,7 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
- cancel_work_sync(&fctx->allow_block_work);
+ cancel_work_sync(&fctx->uevent_work);
nouveau_fence_context_kill(fctx, 0);
nvif_event_dtor(&fctx->event);
fctx->dead = 1;
@@ -146,12 +146,13 @@ nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fc
return drop;
}
-static int
-nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
+static void
+nouveau_fence_uevent_work(struct work_struct *work)
{
- struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
+ struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
+ uevent_work);
unsigned long flags;
- int ret = NVIF_EVENT_KEEP;
+ int drop = 0;
spin_lock_irqsave(&fctx->lock, flags);
if (!list_empty(&fctx->pending)) {
@@ -161,23 +162,20 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
fence = list_entry(fctx->pending.next, typeof(*fence), head);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
if (nouveau_fence_update(chan, fctx))
- ret = NVIF_EVENT_DROP;
+ drop = 1;
}
- spin_unlock_irqrestore(&fctx->lock, flags);
+ if (drop)
+ nvif_event_block(&fctx->event);
- return ret;
+ spin_unlock_irqrestore(&fctx->lock, flags);
}
-static void
-nouveau_fence_work_allow_block(struct work_struct *work)
+static int
+nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
{
- struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
- allow_block_work);
-
- if (atomic_read(&fctx->notify_ref) == 0)
- nvif_event_block(&fctx->event);
- else
- nvif_event_allow(&fctx->event);
+ struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
+ schedule_work(&fctx->uevent_work);
+ return NVIF_EVENT_KEEP;
}
void
@@ -191,7 +189,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
} args;
int ret;
- INIT_WORK(&fctx->allow_block_work, nouveau_fence_work_allow_block);
+ INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
@@ -535,19 +533,15 @@ static bool nouveau_fence_enable_signaling(struct dma_fence *f)
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
bool ret;
- bool do_work;
- if (atomic_inc_return(&fctx->notify_ref) == 0)
- do_work = true;
+ if (!fctx->notify_ref++)
+ nvif_event_allow(&fctx->event);
ret = nouveau_fence_no_signaling(f);
if (ret)
set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
- else if (atomic_dec_and_test(&fctx->notify_ref))
- do_work = true;
-
- if (do_work)
- schedule_work(&fctx->allow_block_work);
+ else if (!--fctx->notify_ref)
+ nvif_event_block(&fctx->event);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 28f5cf013b89..8bc065acfe35 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -3,7 +3,6 @@
#define __NOUVEAU_FENCE_H__
#include <linux/dma-fence.h>
-#include <linux/workqueue.h>
#include <nvif/event.h>
struct nouveau_drm;
@@ -45,10 +44,9 @@ struct nouveau_fence_chan {
u32 context;
char name[32];
+ struct work_struct uevent_work;
struct nvif_event event;
- struct work_struct allow_block_work;
- atomic_t notify_ref;
- int dead, killed;
+ int notify_ref, dead, killed;
};
struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index dd98f6910f9c..32fa2e273965 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -398,7 +398,7 @@ static const struct drm_sched_backend_ops nouveau_sched_ops = {
.free_job = nouveau_sched_free_job,
};
-int
+static int
nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
struct workqueue_struct *wq, u32 credit_limit)
{
@@ -453,7 +453,30 @@ fail_wq:
return ret;
}
-void
+int
+nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit)
+{
+ struct nouveau_sched *sched;
+ int ret;
+
+ sched = kzalloc(sizeof(*sched), GFP_KERNEL);
+ if (!sched)
+ return -ENOMEM;
+
+ ret = nouveau_sched_init(sched, drm, wq, credit_limit);
+ if (ret) {
+ kfree(sched);
+ return ret;
+ }
+
+ *psched = sched;
+
+ return 0;
+}
+
+
+static void
nouveau_sched_fini(struct nouveau_sched *sched)
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
@@ -471,3 +494,14 @@ nouveau_sched_fini(struct nouveau_sched *sched)
if (sched->wq)
destroy_workqueue(sched->wq);
}
+
+void
+nouveau_sched_destroy(struct nouveau_sched **psched)
+{
+ struct nouveau_sched *sched = *psched;
+
+ nouveau_sched_fini(sched);
+ kfree(sched);
+
+ *psched = NULL;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index a6528f5981e6..e1f01a23e6f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -111,8 +111,8 @@ struct nouveau_sched {
} job;
};
-int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
- struct workqueue_struct *wq, u32 credit_limit);
-void nouveau_sched_fini(struct nouveau_sched *sched);
+int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit);
+void nouveau_sched_destroy(struct nouveau_sched **psched);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index cc03e0c22ff3..5e4565c5011a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -1011,7 +1011,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
if (ret)
return ret;
- buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
+ buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
if (!buffer->fault)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 4f223c972c6a..0a0a11dc9ec0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1740,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
if (ret)
return ret;
- args.sched = &cli->sched;
+ args.sched = cli->sched;
args.file_priv = file_priv;
ret = nouveau_uvmm_vm_bind(&args);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
index 4135690326f4..3a30bea30e36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
@@ -168,12 +168,11 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
rm->flush = r535_bar_flush;
ret = gf100_bar_new_(rm, device, type, inst, &bar);
- *pbar = bar;
if (ret) {
- if (!bar)
- kfree(rm);
+ kfree(rm);
return ret;
}
+ *pbar = bar;
bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
if (!bar->flushBAR2PhysMode)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 19188683c8fc..8c2bf1c16f2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
return (void *)fw;
}
+static void
+shadow_fw_release(void *fw)
+{
+ release_firmware(fw);
+}
+
static const struct nvbios_source
shadow_fw = {
.name = "firmware",
.init = shadow_fw_init,
- .fini = (void(*)(void *))release_firmware,
+ .fini = shadow_fw_release,
.read = shadow_fw_read,
.rw = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 9ee58e2a0eb2..a73a5b589790 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -997,6 +997,32 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
return 0;
}
+static void
+nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
+{
+ if (mem->data) {
+ /*
+ * Poison the buffer to catch any unexpected access from
+ * GSP-RM if the buffer was prematurely freed.
+ */
+ memset(mem->data, 0xFF, mem->size);
+
+ dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
+ memset(mem, 0, sizeof(*mem));
+ }
+}
+
+static int
+nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
+{
+ mem->size = size;
+ mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
+ if (WARN_ON(!mem->data))
+ return -ENOMEM;
+
+ return 0;
+}
+
static int
r535_gsp_postinit(struct nvkm_gsp *gsp)
{
@@ -1024,6 +1050,11 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
nvkm_inth_allow(&gsp->subdev.inth);
nvkm_wr32(device, 0x110004, 0x00000040);
+
+ /* Release the DMA buffers that were needed only for boot and init */
+ nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
+ nvkm_gsp_mem_dtor(gsp, &gsp->libos);
+
return ret;
}
@@ -1078,7 +1109,6 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
if (IS_ERR(rpc))
return PTR_ERR(rpc);
- rpc->size = sizeof(*rpc);
rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
@@ -1094,6 +1124,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
strings += name_len;
str_offset += name_len;
}
+ rpc->size = str_offset;
return nvkm_gsp_rpc_wr(gsp, rpc, false);
}
@@ -1532,27 +1563,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
return 0;
}
-static void
-nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
-{
- if (mem->data) {
- dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
- mem->data = NULL;
- }
-}
-
-static int
-nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem)
-{
- mem->size = size;
- mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
- if (WARN_ON(!mem->data))
- return -ENOMEM;
-
- return 0;
-}
-
-
static int
r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
{
@@ -1938,20 +1948,20 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
* See kgspCreateRadix3_IMPL
*/
static int
-nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
+nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
struct nvkm_gsp_radix3 *rx3)
{
u64 addr;
for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
u64 *ptes;
- int idx;
+ size_t bufsize;
+ int ret, idx;
- rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
- rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size,
- &rx3->mem[i].addr, GFP_KERNEL);
- if (WARN_ON(!rx3->mem[i].data))
- return -ENOMEM;
+ bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+ ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]);
+ if (ret)
+ return ret;
ptes = rx3->mem[i].data;
if (i == 2) {
@@ -1991,7 +2001,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
if (ret)
return ret;
- ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3);
+ ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3);
if (ret)
return ret;
@@ -2150,6 +2160,13 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
mutex_destroy(&gsp->cmdq.mutex);
r535_gsp_dtor_fws(gsp);
+
+ nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
+ nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
+ nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
+ nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
+ nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
+ nvkm_gsp_mem_dtor(gsp, &gsp->logrm);
}
int
@@ -2194,7 +2211,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
memcpy(gsp->sig.data, data, size);
/* Build radix3 page table for ELF image. */
- ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
+ ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
if (ret)
return ret;
@@ -2295,8 +2312,12 @@ r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
struct nvkm_subdev *subdev = &gsp->subdev;
int ret;
+ bool enable_gsp = fwif->enable;
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+ enable_gsp = true;
+#endif
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
return -EINVAL;
if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index dad938cf6dec..8f3783742208 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -539,6 +539,8 @@ config DRM_PANEL_RAYDIUM_RM692E5
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
help
Say Y here if you want to enable support for Raydium RM692E5-based
display panels, such as the one found in the Fairphone 5 smartphone.
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index ea5a85779382..f23d8832a1ad 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -309,7 +309,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
.off_func = s6d7aa0_lsl080al02_off,
.drm_mode = &s6d7aa0_lsl080al02_mode,
.mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_flags = 0,
.has_backlight = false,
.use_passwd3 = false,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 2214cb09678c..d493ee735c73 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -3948,6 +3948,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
static const struct panel_desc tianma_tm070jvhg33 = {
@@ -3960,6 +3961,7 @@ static const struct panel_desc tianma_tm070jvhg33 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
static const struct display_timing tianma_tm070rvhg71_timing = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 85b3b4871a1d..fdd768bbd487 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -1985,8 +1985,10 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
}
- if (!clock)
+ if (!clock) {
+ vop2_unlock(vop2);
return;
+ }
if (vcstate->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
!(vp_data->feature & VOP2_VP_FEATURE_OUTPUT_10BIT))
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 550492a7a031..d442b893275b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1184,14 +1184,16 @@ static void drm_sched_run_job_work(struct work_struct *w)
if (READ_ONCE(sched->pause_submit))
return;
+ /* Find entity with a ready job */
entity = drm_sched_select_entity(sched);
if (!entity)
- return;
+ return; /* No more work */
sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job) {
complete_all(&entity->entity_idle);
- return; /* No more work */
+ drm_sched_run_job_queue(sched);
+ return;
}
s_fence = sched_job->s_fence;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index ff36171c8fb7..03d1c76aec2d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -960,7 +960,8 @@ int host1x_client_iommu_attach(struct host1x_client *client)
* not the shared IOMMU domain, don't try to attach it to a different
* domain. This allows using the IOMMU-backed DMA API.
*/
- if (domain && domain != tegra->domain)
+ if (domain && domain->type != IOMMU_DOMAIN_IDENTITY &&
+ domain != tegra->domain)
return 0;
if (tegra->domain) {
@@ -1242,9 +1243,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm_mode_config_reset(drm);
- err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
- if (err < 0)
- goto hub;
+ /*
+ * Only take over from a potential firmware framebuffer if any CRTCs
+ * have been registered. This must not be a fatal error because there
+ * are other accelerators that are exposed via this driver.
+ *
+ * Another case where this happens is on Tegra234 where the display
+ * hardware is no longer part of the host1x complex, so this driver
+ * will not expose any modesetting features.
+ */
+ if (drm->mode_config.num_crtc > 0) {
+ err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+ if (err < 0)
+ goto hub;
+ } else {
+ /*
+ * Indicate to userspace that this doesn't expose any display
+ * capabilities.
+ */
+ drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ }
err = drm_dev_register(drm, 0);
if (err < 0)
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index ea2af6bd9abe..be2d9d7764be 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -8,16 +8,308 @@
#include <linux/prime_numbers.h>
#include <linux/sched/signal.h>
+#include <linux/sizes.h>
#include <drm/drm_buddy.h>
#include "../lib/drm_random.h"
+static unsigned int random_seed;
+
static inline u64 get_size(int order, u64 chunk_size)
{
return (1 << order) * chunk_size;
}
+static void drm_test_buddy_alloc_range_bias(struct kunit *test)
+{
+ u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
+ DRM_RND_STATE(prng, random_seed);
+ unsigned int i, count, *order;
+ struct drm_buddy mm;
+ LIST_HEAD(allocated);
+
+ bias_size = SZ_1M;
+ ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
+ ps = max(SZ_4K, ps);
+ mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
+
+ kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ count = mm_size / bias_size;
+ order = drm_random_order(count, &prng);
+ KUNIT_EXPECT_TRUE(test, order);
+
+ /*
+ * Idea is to split the address space into uniform bias ranges, and then
+ * in some random order allocate within each bias, using various
+ * patterns within. This should detect if allocations leak out from a
+ * given bias, for example.
+ */
+
+ for (i = 0; i < count; i++) {
+ LIST_HEAD(tmp);
+ u32 size;
+
+ bias_start = order[i] * bias_size;
+ bias_end = bias_start + bias_size;
+ bias_rem = bias_size;
+
+ /* internal round_up too big */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size + ps, bias_size,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size, bias_size);
+
+ /* size too big */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size + ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size + ps, ps);
+
+ /* bias range too small for size */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start + ps,
+ bias_end, bias_size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start + ps, bias_end, bias_size, ps);
+
+ /* bias misaligned */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start + ps,
+ bias_end - ps,
+ bias_size >> 1, bias_size >> 1,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
+
+ /* single big page */
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size, bias_size,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size, bias_size);
+ drm_buddy_free_list(&mm, &tmp);
+
+ /* single page with internal round_up */
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, ps, bias_size,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, ps, bias_size);
+ drm_buddy_free_list(&mm, &tmp);
+
+ /* random size within */
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+ if (size)
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+
+ bias_rem -= size;
+ /* too big for current avail */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_rem + ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_rem + ps, ps);
+
+ if (bias_rem) {
+ /* random fill of the remainder */
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+ size = max(size, ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+ /*
+ * Intentionally allow some space to be left
+ * unallocated, and ideally not always on the bias
+ * boundaries.
+ */
+ drm_buddy_free_list(&mm, &tmp);
+ } else {
+ list_splice_tail(&tmp, &allocated);
+ }
+ }
+
+ kfree(order);
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+
+ /*
+ * Something more free-form. Idea is to pick a random starting bias
+ * range within the address space and then start filling it up. Also
+ * randomly grow the bias range in both directions as we go along. This
+ * should give us bias start/end which is not always uniform like above,
+ * and in some cases will require the allocator to jump over already
+ * allocated nodes in the middle of the address space.
+ */
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
+ bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
+ bias_end = max(bias_end, bias_start + ps);
+ bias_rem = bias_end - bias_start;
+
+ do {
+ u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size);
+ bias_rem -= size;
+
+ /*
+ * Try to randomly grow the bias range in both directions, or
+ * only one, or perhaps don't grow at all.
+ */
+ do {
+ u32 old_bias_start = bias_start;
+ u32 old_bias_end = bias_end;
+
+ if (bias_start)
+ bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
+ if (bias_end != mm_size)
+ bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
+
+ bias_rem += old_bias_start - bias_start;
+ bias_rem += bias_end - old_bias_end;
+ } while (!bias_rem && (bias_start || bias_end != mm_size));
+ } while (bias_rem);
+
+ KUNIT_ASSERT_EQ(test, bias_start, 0);
+ KUNIT_ASSERT_EQ(test, bias_end, mm_size);
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
+ ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc passed with bias(%x-%x), size=%u\n",
+ bias_start, bias_end, ps);
+
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_contiguous(struct kunit *test)
+{
+ const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
+ unsigned long i, n_pages, total;
+ struct drm_buddy_block *block;
+ struct drm_buddy mm;
+ LIST_HEAD(left);
+ LIST_HEAD(middle);
+ LIST_HEAD(right);
+ LIST_HEAD(allocated);
+
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+ /*
+ * Idea is to fragment the address space by alternating block
+ * allocations between three different lists; one for left, middle and
+ * right. We can then free a list to simulate fragmentation. In
+ * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
+ * including the try_harder path.
+ */
+
+ i = 0;
+ n_pages = mm_size / ps;
+ do {
+ struct list_head *list;
+ int slot = i % 3;
+
+ if (slot == 0)
+ list = &left;
+ else if (slot == 1)
+ list = &middle;
+ else
+ list = &right;
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ ps, ps, list, 0),
+ "buddy_alloc hit an error size=%u\n",
+ ps);
+ } while (++i < n_pages);
+
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%u\n", 3 * ps);
+
+ drm_buddy_free_list(&mm, &middle);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%u\n", 3 * ps);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 2 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%u\n", 2 * ps);
+
+ drm_buddy_free_list(&mm, &right);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%u\n", 3 * ps);
+ /*
+ * At this point we should have enough contiguous space for 2 blocks,
+ * however they are never buddies (since we freed middle and right) so
+ * will require the try_harder logic to find them.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 2 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc hit an error size=%u\n", 2 * ps);
+
+ drm_buddy_free_list(&mm, &left);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc hit an error size=%u\n", 3 * ps);
+
+ total = 0;
+ list_for_each_entry(block, &allocated, link)
+ total += drm_buddy_block_size(&mm, block);
+
+ KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
+
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+}
+
static void drm_test_buddy_alloc_pathological(struct kunit *test)
{
u64 mm_size, size, start = 0;
@@ -275,16 +567,30 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_fini(&mm);
}
+static int drm_buddy_suite_init(struct kunit_suite *suite)
+{
+ while (!random_seed)
+ random_seed = get_random_u32();
+
+ kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
+ random_seed);
+
+ return 0;
+}
+
static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_limit),
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
+ KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+ KUNIT_CASE(drm_test_buddy_alloc_range_bias),
{}
};
static struct kunit_suite drm_buddy_test_suite = {
.name = "drm_buddy",
+ .suite_init = drm_buddy_suite_init,
.test_cases = drm_buddy_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 4e9247cf9977..1eb0c304f960 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -188,13 +188,13 @@ out:
static void drm_test_mm_debug(struct kunit *test)
{
+ struct drm_printer p = drm_debug_printer(test->name);
struct drm_mm mm;
struct drm_mm_node nodes[2];
/* Create a small drm_mm with a couple of nodes and a few holes, and
* check that the debug iterator doesn't explode over a trivial drm_mm.
*/
-
drm_mm_init(&mm, 0, 4096);
memset(nodes, 0, sizeof(nodes));
@@ -209,6 +209,9 @@ static void drm_test_mm_debug(struct kunit *test)
KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
"failed to reserve node[0] {start=%lld, size=%lld)\n",
nodes[0].start, nodes[0].size);
+
+ drm_mm_print(&mm, &p);
+ KUNIT_SUCCEED(test);
}
static bool expect_insert(struct kunit *test, struct drm_mm *mm,
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index f5187b384ae9..76027960054f 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -95,11 +95,17 @@ static int ttm_global_init(void)
ttm_pool_mgr_init(num_pages);
ttm_tt_mgr_init(num_pages, num_dma32);
- glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+ glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32 |
+ __GFP_NOWARN);
+ /* Retry without GFP_DMA32 for platforms DMA32 is not available */
if (unlikely(glob->dummy_read_page == NULL)) {
- ret = -ENOMEM;
- goto out;
+ glob->dummy_read_page = alloc_page(__GFP_ZERO);
+ if (unlikely(glob->dummy_read_page == NULL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pr_warn("Using GFP_DMA32 fallback for dummy_read_page\n");
}
INIT_LIST_HEAD(&glob->device_list);
@@ -195,7 +201,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
bool use_dma_alloc, bool use_dma32)
{
struct ttm_global *glob = &ttm_glob;
- int ret;
+ int ret, nid;
if (WARN_ON(vma_manager == NULL))
return -EINVAL;
@@ -215,7 +221,12 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
ttm_sys_man_init(bdev);
- ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32);
+ if (dev)
+ nid = dev_to_node(dev);
+ else
+ nid = NUMA_NO_NODE;
+
+ ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index b62f420a9f96..112438d965ff 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -387,7 +387,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
enum ttm_caching caching,
pgoff_t start_page, pgoff_t end_page)
{
- struct page **pages = tt->pages;
+ struct page **pages = &tt->pages[start_page];
unsigned int order;
pgoff_t i, nr;
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index fcff41dd2315..88f63d526b22 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -147,6 +147,13 @@ v3d_job_allocate(void **container, size_t size)
return 0;
}
+static void
+v3d_job_deallocate(void **container)
+{
+ kfree(*container);
+ *container = NULL;
+}
+
static int
v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
struct v3d_job *job, void (*free)(struct kref *ref),
@@ -273,8 +280,10 @@ v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
ret = v3d_job_init(v3d, file_priv, &(*job)->base,
v3d_job_free, args->in_sync, se, V3D_CSD);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)job);
return ret;
+ }
ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
if (ret)
@@ -282,8 +291,10 @@ v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
ret = v3d_job_init(v3d, file_priv, *clean_job,
v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)clean_job);
return ret;
+ }
(*job)->args = *args;
@@ -860,8 +871,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &render->base,
v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)&render);
goto fail;
+ }
render->start = args->rcl_start;
render->end = args->rcl_end;
@@ -874,8 +887,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)&bin);
goto fail;
+ }
bin->start = args->bcl_start;
bin->end = args->bcl_end;
@@ -892,8 +907,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, clean_job,
v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)&clean_job);
goto fail;
+ }
last_job = clean_job;
} else {
@@ -1015,8 +1032,10 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &job->base,
v3d_job_free, args->in_sync, &se, V3D_TFU);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)&job);
goto fail;
+ }
job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
sizeof(*job->base.bo), GFP_KERNEL);
@@ -1233,8 +1252,10 @@ v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
v3d_job_free, 0, &se, V3D_CPU);
- if (ret)
+ if (ret) {
+ v3d_job_deallocate((void *)&cpu_job);
goto fail;
+ }
clean_job = cpu_job->indirect_csd.clean_job;
csd_job = cpu_job->indirect_csd.job;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index f8e9abe647b9..9539aa28937f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -94,6 +94,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
goto err_free;
}
+ dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX);
ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 3062e0e0d467..79ba98a169f9 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -50,8 +50,8 @@
#define HOST2GUC_SELF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffff << 16)
-#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffff << 0)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY (0xffffu << 16)
+#define HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN (0xffffu << 0)
#define HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn
#define HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
index 811add10c30d..c165e26c0976 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
@@ -242,8 +242,8 @@ struct slpc_shared_data {
(HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \
HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
-#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xff << 8)
-#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xff << 0)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xffu << 8)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xffu << 0)
#define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
index 3b83f907ece4..0b1146d0c997 100644
--- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
@@ -82,11 +82,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
#define GUC_CTB_HDR_LEN 1u
#define GUC_CTB_MSG_MIN_LEN GUC_CTB_HDR_LEN
#define GUC_CTB_MSG_MAX_LEN 256u
-#define GUC_CTB_MSG_0_FENCE (0xffff << 16)
-#define GUC_CTB_MSG_0_FORMAT (0xf << 12)
+#define GUC_CTB_MSG_0_FENCE (0xffffu << 16)
+#define GUC_CTB_MSG_0_FORMAT (0xfu << 12)
#define GUC_CTB_FORMAT_HXG 0u
-#define GUC_CTB_MSG_0_RESERVED (0xf << 8)
-#define GUC_CTB_MSG_0_NUM_DWORDS (0xff << 0)
+#define GUC_CTB_MSG_0_RESERVED (0xfu << 8)
+#define GUC_CTB_MSG_0_NUM_DWORDS (0xffu << 0)
/**
* DOC: CTB HXG Message
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 47094b9b044c..0400bc0fccdc 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -31,9 +31,9 @@
*/
#define GUC_KLV_LEN_MIN 1u
-#define GUC_KLV_0_KEY (0xffff << 16)
-#define GUC_KLV_0_LEN (0xffff << 0)
-#define GUC_KLV_n_VALUE (0xffffffff << 0)
+#define GUC_KLV_0_KEY (0xffffu << 16)
+#define GUC_KLV_0_LEN (0xffffu << 0)
+#define GUC_KLV_n_VALUE (0xffffffffu << 0)
/**
* DOC: GuC Self Config KLVs
diff --git a/drivers/gpu/drm/xe/abi/guc_messages_abi.h b/drivers/gpu/drm/xe/abi/guc_messages_abi.h
index 3d199016cf88..29e414c82d56 100644
--- a/drivers/gpu/drm/xe/abi/guc_messages_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_messages_abi.h
@@ -40,18 +40,18 @@
*/
#define GUC_HXG_MSG_MIN_LEN 1u
-#define GUC_HXG_MSG_0_ORIGIN (0x1 << 31)
+#define GUC_HXG_MSG_0_ORIGIN (0x1u << 31)
#define GUC_HXG_ORIGIN_HOST 0u
#define GUC_HXG_ORIGIN_GUC 1u
-#define GUC_HXG_MSG_0_TYPE (0x7 << 28)
+#define GUC_HXG_MSG_0_TYPE (0x7u << 28)
#define GUC_HXG_TYPE_REQUEST 0u
#define GUC_HXG_TYPE_EVENT 1u
#define GUC_HXG_TYPE_NO_RESPONSE_BUSY 3u
#define GUC_HXG_TYPE_NO_RESPONSE_RETRY 5u
#define GUC_HXG_TYPE_RESPONSE_FAILURE 6u
#define GUC_HXG_TYPE_RESPONSE_SUCCESS 7u
-#define GUC_HXG_MSG_0_AUX (0xfffffff << 0)
-#define GUC_HXG_MSG_n_PAYLOAD (0xffffffff << 0)
+#define GUC_HXG_MSG_0_AUX (0xfffffffu << 0)
+#define GUC_HXG_MSG_n_PAYLOAD (0xffffffffu << 0)
/**
* DOC: HXG Request
@@ -85,8 +85,8 @@
*/
#define GUC_HXG_REQUEST_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
-#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfff << 16)
-#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffff << 0)
+#define GUC_HXG_REQUEST_MSG_0_DATA0 (0xfffu << 16)
+#define GUC_HXG_REQUEST_MSG_0_ACTION (0xffffu << 0)
#define GUC_HXG_REQUEST_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
/**
@@ -117,8 +117,8 @@
*/
#define GUC_HXG_EVENT_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN
-#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfff << 16)
-#define GUC_HXG_EVENT_MSG_0_ACTION (0xffff << 0)
+#define GUC_HXG_EVENT_MSG_0_DATA0 (0xfffu << 16)
+#define GUC_HXG_EVENT_MSG_0_ACTION (0xffffu << 0)
#define GUC_HXG_EVENT_MSG_n_DATAn GUC_HXG_MSG_n_PAYLOAD
/**
@@ -188,8 +188,8 @@
*/
#define GUC_HXG_FAILURE_MSG_LEN GUC_HXG_MSG_MIN_LEN
-#define GUC_HXG_FAILURE_MSG_0_HINT (0xfff << 16)
-#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffff << 0)
+#define GUC_HXG_FAILURE_MSG_0_HINT (0xfffu << 16)
+#define GUC_HXG_FAILURE_MSG_0_ERROR (0xffffu << 0)
/**
* DOC: HXG Response
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
index 5f19550cc845..777c20ceabab 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
@@ -10,7 +10,7 @@
#include "xe_bo.h"
-#define i915_gem_object_is_shmem(obj) ((obj)->flags & XE_BO_CREATE_SYSTEM_BIT)
+#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */
static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n)
{
@@ -35,12 +35,10 @@ static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
u32 ofs, u64 *ptr, u32 size)
{
struct ttm_bo_kmap_obj map;
- void *virtual;
+ void *src;
bool is_iomem;
int ret;
- XE_WARN_ON(size != 8);
-
ret = xe_bo_lock(bo, true);
if (ret)
return ret;
@@ -50,11 +48,12 @@ static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
goto out_unlock;
ofs &= ~PAGE_MASK;
- virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+ src = ttm_kmap_obj_virtual(&map, &is_iomem);
+ src += ofs;
if (is_iomem)
- *ptr = readq((void __iomem *)(virtual + ofs));
+ memcpy_fromio(ptr, (void __iomem *)src, size);
else
- *ptr = *(u64 *)(virtual + ofs);
+ memcpy(ptr, src, size);
ttm_bo_kunmap(&map);
out_unlock:
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
index ef56bd517b28..421b819fd4ba 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
@@ -21,4 +21,5 @@ kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_mocs kunit test");
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index a53c22a19582..b4715b78ef3b 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -74,9 +74,6 @@ static const struct platform_test_case cases[] = {
SUBPLATFORM_CASE(DG2, G11, B1),
SUBPLATFORM_CASE(DG2, G12, A0),
SUBPLATFORM_CASE(DG2, G12, A1),
- PLATFORM_CASE(PVC, B0),
- PLATFORM_CASE(PVC, B1),
- PLATFORM_CASE(PVC, C0),
GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 0b0e262e2166..4d3b80ec906d 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -28,6 +28,14 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
+const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
+ [XE_PL_SYSTEM] = "system",
+ [XE_PL_TT] = "gtt",
+ [XE_PL_VRAM0] = "vram0",
+ [XE_PL_VRAM1] = "vram1",
+ [XE_PL_STOLEN] = "stolen"
+};
+
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
@@ -713,8 +721,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
migrate = xe->tiles[0].migrate;
xe_assert(xe, migrate);
-
- trace_xe_bo_move(bo);
+ trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
xe_device_mem_access_get(xe);
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 9b1279aca127..8be42ac6cd07 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -243,6 +243,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
extern struct ttm_device_funcs xe_ttm_funcs;
+extern const char *const xe_mem_type_to_name[];
int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index b8d8da546670..5176c27e4b6a 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -83,9 +83,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static void device_kill_persistent_exec_queues(struct xe_device *xe,
- struct xe_file *xef);
-
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -102,8 +99,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&xef->exec_queue.lock);
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
- device_kill_persistent_exec_queues(xe, xef);
-
mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm);
@@ -255,9 +250,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid);
}
- drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock);
- INIT_LIST_HEAD(&xe->persistent_engines.list);
-
spin_lock_init(&xe->pinned.lock);
INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
INIT_LIST_HEAD(&xe->pinned.external_vram);
@@ -570,37 +562,6 @@ void xe_device_shutdown(struct xe_device *xe)
{
}
-void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
-{
- mutex_lock(&xe->persistent_engines.lock);
- list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
-void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
- struct xe_exec_queue *q)
-{
- mutex_lock(&xe->persistent_engines.lock);
- if (!list_empty(&q->persistent.link))
- list_del(&q->persistent.link);
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
-static void device_kill_persistent_exec_queues(struct xe_device *xe,
- struct xe_file *xef)
-{
- struct xe_exec_queue *q, *next;
-
- mutex_lock(&xe->persistent_engines.lock);
- list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
- persistent.link)
- if (q->persistent.xef == xef) {
- xe_exec_queue_kill(q);
- list_del_init(&q->persistent.link);
- }
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
void xe_device_wmb(struct xe_device *xe)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
@@ -613,7 +574,7 @@ void xe_device_wmb(struct xe_device *xe)
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
{
return xe_device_has_flat_ccs(xe) ?
- DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
+ DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
}
bool xe_device_mem_access_ongoing(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 3da83b233206..08d8b72c7731 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe);
void xe_device_remove(struct xe_device *xe);
void xe_device_shutdown(struct xe_device *xe);
-void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
-void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
- struct xe_exec_queue *q);
-
void xe_device_wmb(struct xe_device *xe);
static inline struct xe_file *to_xe_file(const struct drm_file *file)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 5dc9127a2029..e8491979a6f2 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -341,14 +341,6 @@ struct xe_device {
struct mutex lock;
} usm;
- /** @persistent_engines: engines that are closed but still running */
- struct {
- /** @lock: protects persistent engines */
- struct mutex lock;
- /** @list: list of persistent engines */
- struct list_head list;
- } persistent_engines;
-
/** @pinned: pinned BO state */
struct {
/** @lock: protected pinned BO list state */
diff --git a/drivers/gpu/drm/xe/xe_display.c b/drivers/gpu/drm/xe/xe_display.c
index 74391d9b11ae..e4db069f0db3 100644
--- a/drivers/gpu/drm/xe/xe_display.c
+++ b/drivers/gpu/drm/xe/xe_display.c
@@ -134,8 +134,6 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
int xe_display_init_nommio(struct xe_device *xe)
{
- int err;
-
if (!xe->info.enable_display)
return 0;
@@ -145,10 +143,6 @@ int xe_display_init_nommio(struct xe_device *xe)
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(xe);
- err = intel_power_domains_init(xe);
- if (err)
- return err;
-
return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 64ed303728fd..da2627ed6ae7 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -175,7 +175,7 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
return 0;
}
-const struct dma_buf_ops xe_dmabuf_ops = {
+static const struct dma_buf_ops xe_dmabuf_ops = {
.attach = xe_dma_buf_attach,
.detach = xe_dma_buf_detach,
.pin = xe_dma_buf_pin,
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 82d1305e831f..6040e4d22b28 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -131,14 +131,6 @@ static void bo_meminfo(struct xe_bo *bo,
static void show_meminfo(struct drm_printer *p, struct drm_file *file)
{
- static const char *const mem_type_to_name[TTM_NUM_MEM_TYPES] = {
- [XE_PL_SYSTEM] = "system",
- [XE_PL_TT] = "gtt",
- [XE_PL_VRAM0] = "vram0",
- [XE_PL_VRAM1] = "vram1",
- [4 ... 6] = NULL,
- [XE_PL_STOLEN] = "stolen"
- };
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
struct xe_file *xef = file->driver_priv;
struct ttm_device *bdev = &xef->xe->ttm;
@@ -171,7 +163,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
spin_unlock(&client->bos_lock);
for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
- if (!mem_type_to_name[mem_type])
+ if (!xe_mem_type_to_name[mem_type])
continue;
man = ttm_manager_type(bdev, mem_type);
@@ -182,7 +174,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
- mem_type_to_name[mem_type]);
+ xe_mem_type_to_name[mem_type]);
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index b853feed9ccc..17f26952e665 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -111,7 +111,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec;
- u32 i, num_syncs = 0;
+ u32 i, num_syncs = 0, num_ufence = 0;
struct xe_sched_job *job;
struct dma_fence *rebind_fence;
struct xe_vm *vm;
@@ -157,6 +157,14 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
SYNC_PARSE_FLAG_LR_MODE : 0));
if (err)
goto err_syncs;
+
+ if (xe_sync_is_ufence(&syncs[i]))
+ num_ufence++;
+ }
+
+ if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
+ err = -EINVAL;
+ goto err_syncs;
}
if (xe_exec_queue_is_parallel(q)) {
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index bcfc4127c7c5..49223026c89f 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -60,7 +60,6 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops;
- INIT_LIST_HEAD(&q->persistent.link);
INIT_LIST_HEAD(&q->compute.link);
INIT_LIST_HEAD(&q->multi_gt_link);
@@ -310,102 +309,6 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
return q->ops->set_timeslice(q, value);
}
-static int exec_queue_set_preemption_timeout(struct xe_device *xe,
- struct xe_exec_queue *q, u64 value,
- bool create)
-{
- u32 min = 0, max = 0;
-
- xe_exec_queue_get_prop_minmax(q->hwe->eclass,
- XE_EXEC_QUEUE_PREEMPT_TIMEOUT, &min, &max);
-
- if (xe_exec_queue_enforce_schedule_limit() &&
- !xe_hw_engine_timeout_in_range(value, min, max))
- return -EINVAL;
-
- return q->ops->set_preempt_timeout(q, value);
-}
-
-static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
- return -EINVAL;
-
- if (value)
- q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
- else
- q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
-
- return 0;
-}
-
-static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- u32 min = 0, max = 0;
-
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- xe_exec_queue_get_prop_minmax(q->hwe->eclass,
- XE_EXEC_QUEUE_JOB_TIMEOUT, &min, &max);
-
- if (xe_exec_queue_enforce_schedule_limit() &&
- !xe_hw_engine_timeout_in_range(value, min, max))
- return -EINVAL;
-
- return q->ops->set_job_timeout(q, value);
-}
-
-static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- q->usm.acc_trigger = value;
-
- return 0;
-}
-
-static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- q->usm.acc_notify = value;
-
- return 0;
-}
-
-static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- if (value > DRM_XE_ACC_GRANULARITY_64M)
- return -EINVAL;
-
- q->usm.acc_granularity = value;
-
- return 0;
-}
-
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
u64 value, bool create);
@@ -413,12 +316,6 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
@@ -437,10 +334,15 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, ext.property >=
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
- XE_IOCTL_DBG(xe, ext.pad))
+ XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
+ if (!exec_queue_set_property_funcs[idx])
+ return -EINVAL;
+
return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
}
@@ -704,9 +606,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
q = xe_exec_queue_create(xe, vm, logical_mask,
- args->width, hwe,
- xe_vm_in_lr_mode(vm) ? 0 :
- EXEC_QUEUE_FLAG_PERSISTENT);
+ args->width, hwe, 0);
up_read(&vm->lock);
xe_vm_put(vm);
if (IS_ERR(q))
@@ -728,8 +628,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
goto kill_exec_queue;
}
- q->persistent.xef = xef;
-
mutex_lock(&xef->exec_queue.lock);
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->exec_queue.lock);
@@ -872,10 +770,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
- if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT))
- xe_exec_queue_kill(q);
- else
- xe_device_add_persistent_exec_queues(xe, q);
+ xe_exec_queue_kill(q);
trace_xe_exec_queue_close(q);
xe_exec_queue_put(q);
@@ -926,20 +821,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
* @q: The exec queue
* @vm: The VM the engine does a bind or exec for
*
- * Get last fence, does not take a ref
+ * Get last fence, takes a ref
*
* Returns: last fence if not signaled, dma fence stub if signaled
*/
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
struct xe_vm *vm)
{
+ struct dma_fence *fence;
+
xe_exec_queue_last_fence_lockdep_assert(q, vm);
if (q->last_fence &&
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
xe_exec_queue_last_fence_put(q, vm);
- return q->last_fence ? q->last_fence : dma_fence_get_stub();
+ fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
+ dma_fence_get(fence);
+ return fence;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 8d4b7feb8c30..36f4901d8d7e 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -105,16 +105,6 @@ struct xe_exec_queue {
struct xe_guc_exec_queue *guc;
};
- /**
- * @persistent: persistent exec queue state
- */
- struct {
- /** @xef: file which this exec queue belongs to */
- struct xe_file *xef;
- /** @link: link in list of persistent exec queues */
- struct list_head link;
- } persistent;
-
union {
/**
* @parallel: parallel submission state
@@ -160,16 +150,6 @@ struct xe_exec_queue {
spinlock_t lock;
} compute;
- /** @usm: unified shared memory state */
- struct {
- /** @acc_trigger: access counter trigger */
- u32 acc_trigger;
- /** @acc_notify: access counter notify */
- u32 acc_notify;
- /** @acc_granularity: access counter granularity */
- u32 acc_granularity;
- } usm;
-
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 96b5224eb478..acb4d9f38fd7 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -212,7 +212,7 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
{
struct xe_execlist_port *port = exl->port;
- enum xe_exec_queue_priority priority = exl->active_priority;
+ enum xe_exec_queue_priority priority = exl->q->sched_props.priority;
XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
XE_WARN_ON(priority < 0);
@@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
- if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
- xe_device_remove_persistent_exec_queues(xe, q);
drm_sched_entity_fini(&exl->entity);
drm_sched_fini(&exl->sched);
kfree(exl);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 3af2adec1295..35474ddbaf97 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
* USM has its only SA pool to non-block behind user operations
*/
if (gt_to_xe(gt)->info.has_usm) {
- gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
+ struct xe_device *xe = gt_to_xe(gt);
+
+ gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
+ IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
if (IS_ERR(gt->usm.bb_pool)) {
err = PTR_ERR(gt->usm.bb_pool);
goto err_force_wake;
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 9358f7336889..9fcae65b6469 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -145,10 +145,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
}
if (xe_gt_is_media_type(gt)) {
- sprintf(gtidle->name, "gt%d-mc\n", gt->info.id);
+ sprintf(gtidle->name, "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency;
} else {
- sprintf(gtidle->name, "gt%d-rc\n", gt->info.id);
+ sprintf(gtidle->name, "gt%d-rc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_rc6_residency;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 77925b35cf8d..8546cd3cc50d 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -480,7 +480,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
* to synchronize with external clients (e.g., firmware), so a semaphore
* register will also need to be taken.
*/
-static void mcr_lock(struct xe_gt *gt)
+static void mcr_lock(struct xe_gt *gt) __acquires(&gt->mcr_lock)
{
struct xe_device *xe = gt_to_xe(gt);
int ret = 0;
@@ -500,7 +500,7 @@ static void mcr_lock(struct xe_gt *gt)
drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT);
}
-static void mcr_unlock(struct xe_gt *gt)
+static void mcr_unlock(struct xe_gt *gt) __releases(&gt->mcr_lock)
{
/* Release hardware semaphore - this is done by writing 1 to the register */
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 59a70d2e0a7a..73f08f1924df 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -165,7 +165,8 @@ retry_userptr:
goto unlock_vm;
}
- if (!xe_vma_is_userptr(vma) || !xe_vma_userptr_check_repin(vma)) {
+ if (!xe_vma_is_userptr(vma) ||
+ !xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
downgrade_write(&vm->lock);
write_locked = false;
}
@@ -181,11 +182,13 @@ retry_userptr:
/* TODO: Validate fault */
if (xe_vma_is_userptr(vma) && write_locked) {
+ struct xe_userptr_vma *uvma = to_userptr_vma(vma);
+
spin_lock(&vm->userptr.invalidated_lock);
- list_del_init(&vma->userptr.invalidate_link);
+ list_del_init(&uvma->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
- ret = xe_vma_userptr_pin_pages(vma);
+ ret = xe_vma_userptr_pin_pages(uvma);
if (ret)
goto unlock_vm;
@@ -220,7 +223,7 @@ retry_userptr:
dma_fence_put(fence);
if (xe_vma_is_userptr(vma))
- ret = xe_vma_userptr_check_repin(vma);
+ ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
vma->usm.tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
@@ -332,7 +335,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
return -EPROTO;
asid = FIELD_GET(PFD_ASID, msg[1]);
- pf_queue = &gt->usm.pf_queue[asid % NUM_PF_QUEUE];
+ pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
spin_lock_irqsave(&pf_queue->lock, flags);
full = pf_queue_full(pf_queue);
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 7eef23a00d77..f4c485289dbe 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -247,6 +247,14 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
xe_gt_assert(gt, vma);
+ /* Execlists not supported */
+ if (gt_to_xe(gt)->info.force_execlist) {
+ if (fence)
+ __invalidation_fence_signal(fence);
+
+ return 0;
+ }
+
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
if (!xe->info.has_range_tlb_invalidation) {
@@ -317,6 +325,10 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
struct drm_printer p = drm_err_printer(__func__);
int ret;
+ /* Execlists not supported */
+ if (gt_to_xe(gt)->info.force_execlist)
+ return 0;
+
/*
* XXX: See above, this algorithm only works if seqno are always in
* order
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index f71085228cb3..d91702592520 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -963,7 +963,9 @@ void xe_guc_pc_fini(struct xe_guc_pc *pc)
struct xe_device *xe = pc_to_xe(pc);
if (xe->info.skip_guc_pc) {
+ xe_device_mem_access_get(xe);
xe_gt_idle_disable_c6(pc_to_gt(pc));
+ xe_device_mem_access_put(xe);
return;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 54ffcfcdd41f..f22ae717b0b2 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1028,8 +1028,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
- if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
- xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
index a6094c81f2ad..a5de3e7b0bd6 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence.c
+++ b/drivers/gpu/drm/xe/xe_hw_fence.c
@@ -217,13 +217,13 @@ struct xe_hw_fence *xe_hw_fence_create(struct xe_hw_fence_ctx *ctx,
if (!fence)
return ERR_PTR(-ENOMEM);
- dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock,
- ctx->dma_fence_ctx, ctx->next_seqno++);
-
fence->ctx = ctx;
fence->seqno_map = seqno_map;
INIT_LIST_HEAD(&fence->irq_link);
+ dma_fence_init(&fence->dma, &xe_hw_fence_ops, &ctx->irq->lock,
+ ctx->dma_fence_ctx, ctx->next_seqno++);
+
trace_xe_hw_fence_create(fence);
return fence;
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 6ef2aa1eae8b..174ed2185481 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -419,7 +419,7 @@ static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_READ_I1, 0),
- uval, 0);
+ uval, NULL);
}
static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index b7fa3831b684..b38319d2801e 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -21,10 +21,10 @@
#include "xe_map.h"
#include "xe_vm.h"
-#define CTX_VALID (1 << 0)
-#define CTX_PRIVILEGE (1 << 8)
-#define CTX_ADDRESSING_MODE_SHIFT 3
-#define LEGACY_64B_CONTEXT 3
+#define LRC_VALID (1 << 0)
+#define LRC_PRIVILEGE (1 << 8)
+#define LRC_ADDRESSING_MODE_SHIFT 3
+#define LRC_LEGACY_64B_CONTEXT 3
#define ENGINE_CLASS_SHIFT 61
#define ENGINE_INSTANCE_SHIFT 48
@@ -682,8 +682,6 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
#define PVC_CTX_ASID (0x2e + 1)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
-#define ACC_GRANULARITY_S 20
-#define ACC_NOTIFY_S 16
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
@@ -754,23 +752,17 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL,
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
if (xe->info.has_asid && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
- (q->usm.acc_granularity <<
- ACC_GRANULARITY_S) | vm->usm.asid);
- if (xe->info.has_usm && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
- (q->usm.acc_notify << ACC_NOTIFY_S) |
- q->usm.acc_trigger);
-
- lrc->desc = CTX_VALID;
- lrc->desc |= LEGACY_64B_CONTEXT << CTX_ADDRESSING_MODE_SHIFT;
+ xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
+
+ lrc->desc = LRC_VALID;
+ lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
/* TODO: Priority */
/* While this appears to have something about privileged batches or
* some such, it really just means PPGTT mode.
*/
if (vm)
- lrc->desc |= CTX_PRIVILEGE;
+ lrc->desc |= LRC_PRIVILEGE;
if (GRAPHICS_VERx100(xe) < 1250) {
lrc->desc |= (u64)hwe->instance << ENGINE_INSTANCE_SHIFT;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index e05e9e7282b6..70480c305602 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -170,11 +170,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (!IS_DGFX(xe)) {
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
- if (xe->info.has_usm) {
- batch = tile->primary_gt->usm.bb_pool->bo;
- m->usm_batch_base_ofs = m->batch_base_ofs;
- }
-
for (i = 0; i < batch->size;
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
@@ -185,6 +180,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
entry);
level++;
}
+ if (xe->info.has_usm) {
+ xe_tile_assert(tile, batch->size == SZ_1M);
+
+ batch = tile->primary_gt->usm.bb_pool->bo;
+ m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
+ xe_tile_assert(tile, batch->size == SZ_512K);
+
+ for (i = 0; i < batch->size;
+ i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
+ XE_PAGE_SIZE) {
+ entry = vm->pt_ops->pte_encode_bo(batch, i,
+ pat_index, 0);
+
+ xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
+ entry);
+ level++;
+ }
+ }
} else {
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
@@ -472,7 +485,7 @@ static void emit_pte(struct xe_migrate *m,
/* Indirect access needs compression enabled uncached PAT index */
if (GRAPHICS_VERx100(xe) >= 2000)
pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
- xe->pat.idx[XE_CACHE_NONE];
+ xe->pat.idx[XE_CACHE_WB];
else
pat_index = xe->pat.idx[XE_CACHE_WB];
@@ -760,14 +773,14 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
xe_res_next(&src_it, src_L0);
else
- emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, src_L0,
- src);
+ emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
+ &src_it, src_L0, src);
if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
xe_res_next(&dst_it, src_L0);
else
- emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, src_L0,
- dst);
+ emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
+ &dst_it, src_L0, dst);
if (copy_system_ccs)
emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
@@ -1009,8 +1022,8 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
xe_res_next(&src_it, clear_L0);
else
- emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, clear_L0,
- dst);
+ emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
+ &src_it, clear_L0, dst);
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
@@ -1204,8 +1217,11 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
}
if (q) {
fence = xe_exec_queue_last_fence_get(q, vm);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ dma_fence_put(fence);
return false;
+ }
+ dma_fence_put(fence);
}
return true;
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index c8c5d74b6e90..02f7808f28ca 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -105,7 +105,7 @@ static void xe_resize_vram_bar(struct xe_device *xe)
pci_bus_for_each_resource(root, root_res, i) {
if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
- root_res->start > 0x100000000ull)
+ (u64)root_res->start > 0x100000000ul)
break;
}
@@ -272,8 +272,8 @@ int xe_mmio_probe_vram(struct xe_device *xe)
drm_info(&xe->drm, "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", id,
tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.usable_size, &tile->mem.vram.io_size);
drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", id, tile->id,
- &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + tile->mem.vram.actual_physical_size,
- &tile->mem.vram.io_start, tile->mem.vram.io_start + tile->mem.vram.io_size);
+ &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + (u64)tile->mem.vram.actual_physical_size,
+ &tile->mem.vram.io_start, tile->mem.vram.io_start + (u64)tile->mem.vram.io_size);
/* calculate total size using tile size to get the correct HW sizing */
total_size += tile_size;
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index de1030a47588..6653c045f3c9 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -20,8 +20,8 @@
struct xe_pt_dir {
struct xe_pt pt;
- /** @dir: Directory structure for the xe_pt_walk functionality */
- struct xe_ptw_dir dir;
+ /** @children: Array of page-table child nodes */
+ struct xe_ptw *children[XE_PDES];
};
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
@@ -44,7 +44,7 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
{
- return container_of(pt_dir->dir.entries[index], struct xe_pt, base);
+ return container_of(pt_dir->children[index], struct xe_pt, base);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
@@ -65,6 +65,14 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
XE_PTE_NULL;
}
+static void xe_pt_free(struct xe_pt *pt)
+{
+ if (pt->level)
+ kfree(as_xe_pt_dir(pt));
+ else
+ kfree(pt);
+}
+
/**
* xe_pt_create() - Create a page-table.
* @vm: The vm to create for.
@@ -85,15 +93,19 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
{
struct xe_pt *pt;
struct xe_bo *bo;
- size_t size;
int err;
- size = !level ? sizeof(struct xe_pt) : sizeof(struct xe_pt_dir) +
- XE_PDES * sizeof(struct xe_ptw *);
- pt = kzalloc(size, GFP_KERNEL);
+ if (level) {
+ struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+
+ pt = (dir) ? &dir->pt : NULL;
+ } else {
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ }
if (!pt)
return ERR_PTR(-ENOMEM);
+ pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
@@ -106,8 +118,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
goto err_kfree;
}
pt->bo = bo;
- pt->level = level;
- pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL;
+ pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
if (vm->xef)
xe_drm_client_add_bo(vm->xef->client, pt->bo);
@@ -116,7 +127,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
return pt;
err_kfree:
- kfree(pt);
+ xe_pt_free(pt);
return ERR_PTR(err);
}
@@ -193,7 +204,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
deferred);
}
}
- kfree(pt);
+ xe_pt_free(pt);
}
/**
@@ -358,7 +369,7 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
struct iosys_map *map = &parent->bo->vmap;
if (unlikely(xe_child))
- parent->base.dir->entries[offset] = &xe_child->base;
+ parent->base.children[offset] = &xe_child->base;
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
parent->num_live++;
@@ -488,10 +499,12 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
* this device *requires* 64K PTE size for VRAM, fail.
*/
if (level == 0 && !xe_parent->is_compact) {
- if (xe_pt_is_pte_ps64K(addr, next, xe_walk))
+ if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+ xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
pte |= XE_PTE_PS64;
- else if (XE_WARN_ON(xe_walk->needs_64K))
+ } else if (XE_WARN_ON(xe_walk->needs_64K)) {
return -EINVAL;
+ }
}
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
@@ -534,13 +547,16 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
*child = &xe_child->base;
/*
- * Prefer the compact pagetable layout for L0 if possible.
+ * Prefer the compact pagetable layout for L0 if possible. Only
+ * possible if VMA covers entire 2MB region as compact 64k and
+ * 4k pages cannot be mixed within a 2MB region.
* TODO: Suballocate the pt bo to avoid wasting a lot of
* memory.
*/
if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
covers && xe_pt_scan_64K(addr, next, xe_walk)) {
walk->shifts = xe_compact_pt_shifts;
+ xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
flags |= XE_PDE_64K;
xe_child->is_compact = true;
}
@@ -618,8 +634,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (!xe_vma_is_null(vma)) {
if (xe_vma_is_userptr(vma))
- xe_res_first_sg(vma->userptr.sg, 0, xe_vma_size(vma),
- &curs);
+ xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
+ xe_vma_size(vma), &curs);
else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
@@ -853,7 +869,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
xe_pt_destroy(xe_pt_entry(pt_dir, j_),
xe_vma_vm(vma)->flags, deferred);
- pt_dir->dir.entries[j_] = &newpte->base;
+ pt_dir->children[j_] = &newpte->base;
}
kfree(entries[i].pt_entries);
}
@@ -906,17 +922,17 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
-static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
+static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
- u32 divisor = vma->userptr.divisor ? vma->userptr.divisor : 2;
+ u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
static u32 count;
if (count++ % divisor == divisor - 1) {
- struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
- vma->userptr.divisor = divisor << 1;
+ uvma->userptr.divisor = divisor << 1;
spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&vma->userptr.invalidate_link,
+ list_move_tail(&uvma->userptr.invalidate_link,
&vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock);
return true;
@@ -927,7 +943,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
#else
-static bool xe_pt_userptr_inject_eagain(struct xe_vma *vma)
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
return false;
}
@@ -1000,9 +1016,9 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
{
struct xe_pt_migrate_pt_update *userptr_update =
container_of(pt_update, typeof(*userptr_update), base);
- struct xe_vma *vma = pt_update->vma;
- unsigned long notifier_seq = vma->userptr.notifier_seq;
- struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
+ unsigned long notifier_seq = uvma->userptr.notifier_seq;
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
int err = xe_pt_vm_dependencies(pt_update->job,
&vm->rftree[pt_update->tile_id],
pt_update->start,
@@ -1023,7 +1039,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
*/
do {
down_read(&vm->userptr.notifier_lock);
- if (!mmu_interval_read_retry(&vma->userptr.notifier,
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
notifier_seq))
break;
@@ -1032,11 +1048,11 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
if (userptr_update->bind)
return -EAGAIN;
- notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
+ notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
} while (true);
/* Inject errors to test_whether they are handled correctly */
- if (userptr_update->bind && xe_pt_userptr_inject_eagain(vma)) {
+ if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
up_read(&vm->userptr.notifier_lock);
return -EAGAIN;
}
@@ -1297,7 +1313,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
vma->tile_present |= BIT(tile->id);
if (bind_pt_update.locked) {
- vma->userptr.initial_bind = true;
+ to_userptr_vma(vma)->userptr.initial_bind = true;
up_read(&vm->userptr.notifier_lock);
xe_bo_put_commit(&deferred);
}
@@ -1507,7 +1523,7 @@ xe_pt_commit_unbind(struct xe_vma *vma,
xe_pt_destroy(xe_pt_entry(pt_dir, i),
xe_vma_vm(vma)->flags, deferred);
- pt_dir->dir.entries[i] = NULL;
+ pt_dir->children[i] = NULL;
}
}
}
@@ -1642,7 +1658,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
if (!vma->tile_present) {
spin_lock(&vm->userptr.invalidated_lock);
- list_del_init(&vma->userptr.invalidate_link);
+ list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
}
up_read(&vm->userptr.notifier_lock);
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c
index 8f6c8d063f39..b8b3d2aea492 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.c
+++ b/drivers/gpu/drm/xe/xe_pt_walk.c
@@ -74,7 +74,7 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
u64 addr, u64 end, struct xe_pt_walk *walk)
{
pgoff_t offset = xe_pt_offset(addr, level, walk);
- struct xe_ptw **entries = parent->dir ? parent->dir->entries : NULL;
+ struct xe_ptw **entries = parent->children ? parent->children : NULL;
const struct xe_pt_walk_ops *ops = walk->ops;
enum page_walk_action action;
struct xe_ptw *child;
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h
index ec3d1e9efa6d..5ecc4d2f0f65 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.h
+++ b/drivers/gpu/drm/xe/xe_pt_walk.h
@@ -8,28 +8,15 @@
#include <linux/pagewalk.h>
#include <linux/types.h>
-struct xe_ptw_dir;
-
/**
* struct xe_ptw - base class for driver pagetable subclassing.
- * @dir: Pointer to an array of children if any.
+ * @children: Pointer to an array of children if any.
*
* Drivers could subclass this, and if it's a page-directory, typically
- * embed the xe_ptw_dir::entries array in the same allocation.
+ * embed an array of xe_ptw pointers.
*/
struct xe_ptw {
- struct xe_ptw_dir *dir;
-};
-
-/**
- * struct xe_ptw_dir - page directory structure
- * @entries: Array holding page directory children.
- *
- * It is the responsibility of the user to ensure @entries is
- * correctly sized.
- */
-struct xe_ptw_dir {
- struct xe_ptw *entries[0];
+ struct xe_ptw **children;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 9b35673b286c..7e924faeeea0 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -459,21 +459,21 @@ static size_t calc_topo_query_size(struct xe_device *xe)
sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
}
-static void __user *copy_mask(void __user *ptr,
- struct drm_xe_query_topology_mask *topo,
- void *mask, size_t mask_size)
+static int copy_mask(void __user **ptr,
+ struct drm_xe_query_topology_mask *topo,
+ void *mask, size_t mask_size)
{
topo->num_bytes = mask_size;
- if (copy_to_user(ptr, topo, sizeof(*topo)))
- return ERR_PTR(-EFAULT);
- ptr += sizeof(topo);
+ if (copy_to_user(*ptr, topo, sizeof(*topo)))
+ return -EFAULT;
+ *ptr += sizeof(topo);
- if (copy_to_user(ptr, mask, mask_size))
- return ERR_PTR(-EFAULT);
- ptr += mask_size;
+ if (copy_to_user(*ptr, mask, mask_size))
+ return -EFAULT;
+ *ptr += mask_size;
- return ptr;
+ return 0;
}
static int query_gt_topology(struct xe_device *xe,
@@ -493,28 +493,28 @@ static int query_gt_topology(struct xe_device *xe,
}
for_each_gt(gt, xe, id) {
+ int err;
+
topo.gt_id = id;
topo.type = DRM_XE_TOPO_DSS_GEOMETRY;
- query_ptr = copy_mask(query_ptr, &topo,
- gt->fuse_topo.g_dss_mask,
- sizeof(gt->fuse_topo.g_dss_mask));
- if (IS_ERR(query_ptr))
- return PTR_ERR(query_ptr);
+ err = copy_mask(&query_ptr, &topo, gt->fuse_topo.g_dss_mask,
+ sizeof(gt->fuse_topo.g_dss_mask));
+ if (err)
+ return err;
topo.type = DRM_XE_TOPO_DSS_COMPUTE;
- query_ptr = copy_mask(query_ptr, &topo,
- gt->fuse_topo.c_dss_mask,
- sizeof(gt->fuse_topo.c_dss_mask));
- if (IS_ERR(query_ptr))
- return PTR_ERR(query_ptr);
+ err = copy_mask(&query_ptr, &topo, gt->fuse_topo.c_dss_mask,
+ sizeof(gt->fuse_topo.c_dss_mask));
+ if (err)
+ return err;
topo.type = DRM_XE_TOPO_EU_PER_DSS;
- query_ptr = copy_mask(query_ptr, &topo,
- gt->fuse_topo.eu_mask_per_dss,
- sizeof(gt->fuse_topo.eu_mask_per_dss));
- if (IS_ERR(query_ptr))
- return PTR_ERR(query_ptr);
+ err = copy_mask(&query_ptr, &topo,
+ gt->fuse_topo.eu_mask_per_dss,
+ sizeof(gt->fuse_topo.eu_mask_per_dss));
+ if (err)
+ return err;
}
return 0;
diff --git a/drivers/gpu/drm/xe/xe_range_fence.c b/drivers/gpu/drm/xe/xe_range_fence.c
index d35d9ec58e86..372378e89e98 100644
--- a/drivers/gpu/drm/xe/xe_range_fence.c
+++ b/drivers/gpu/drm/xe/xe_range_fence.c
@@ -151,6 +151,11 @@ xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last)
return xe_range_fence_tree_iter_next(rfence, start, last);
}
+static void xe_range_fence_free(struct xe_range_fence *rfence)
+{
+ kfree(rfence);
+}
+
const struct xe_range_fence_ops xe_range_fence_kfree_ops = {
- .free = (void (*)(struct xe_range_fence *rfence)) kfree,
+ .free = xe_range_fence_free,
};
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 01106a1156ad..4e2ccad0e52f 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -274,7 +274,6 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
struct dma_fence *fence;
fence = xe_exec_queue_last_fence_get(job->q, vm);
- dma_fence_get(fence);
return drm_sched_job_add_dependency(&job->drm, fence);
}
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index e4c220cf9115..02c9577fe418 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -19,7 +19,7 @@
#include "xe_macros.h"
#include "xe_sched_job_types.h"
-struct user_fence {
+struct xe_user_fence {
struct xe_device *xe;
struct kref refcount;
struct dma_fence_cb cb;
@@ -27,31 +27,32 @@ struct user_fence {
struct mm_struct *mm;
u64 __user *addr;
u64 value;
+ int signalled;
};
static void user_fence_destroy(struct kref *kref)
{
- struct user_fence *ufence = container_of(kref, struct user_fence,
+ struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence,
refcount);
mmdrop(ufence->mm);
kfree(ufence);
}
-static void user_fence_get(struct user_fence *ufence)
+static void user_fence_get(struct xe_user_fence *ufence)
{
kref_get(&ufence->refcount);
}
-static void user_fence_put(struct user_fence *ufence)
+static void user_fence_put(struct xe_user_fence *ufence)
{
kref_put(&ufence->refcount, user_fence_destroy);
}
-static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
- u64 value)
+static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
+ u64 value)
{
- struct user_fence *ufence;
+ struct xe_user_fence *ufence;
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
@@ -69,7 +70,7 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
static void user_fence_worker(struct work_struct *w)
{
- struct user_fence *ufence = container_of(w, struct user_fence, worker);
+ struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
if (mmget_not_zero(ufence->mm)) {
kthread_use_mm(ufence->mm);
@@ -80,10 +81,11 @@ static void user_fence_worker(struct work_struct *w)
}
wake_up_all(&ufence->xe->ufence_wq);
+ WRITE_ONCE(ufence->signalled, 1);
user_fence_put(ufence);
}
-static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
+static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence)
{
INIT_WORK(&ufence->worker, user_fence_worker);
queue_work(ufence->xe->ordered_wq, &ufence->worker);
@@ -92,7 +94,7 @@ static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- struct user_fence *ufence = container_of(cb, struct user_fence, cb);
+ struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
kick_ufence(ufence, fence);
}
@@ -307,7 +309,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
/* Easy case... */
if (!num_in_fence) {
fence = xe_exec_queue_last_fence_get(q, vm);
- dma_fence_get(fence);
return fence;
}
@@ -322,7 +323,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
}
}
fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
- dma_fence_get(fences[current_fence - 1]);
cf = dma_fence_array_create(num_in_fence, fences,
vm->composite_fence_ctx,
vm->composite_fence_seqno++,
@@ -342,3 +342,39 @@ err_out:
return ERR_PTR(-ENOMEM);
}
+
+/**
+ * xe_sync_ufence_get() - Get user fence from sync
+ * @sync: input sync
+ *
+ * Get a user fence reference from sync.
+ *
+ * Return: xe_user_fence pointer with reference
+ */
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync)
+{
+ user_fence_get(sync->ufence);
+
+ return sync->ufence;
+}
+
+/**
+ * xe_sync_ufence_put() - Put user fence reference
+ * @ufence: user fence reference
+ *
+ */
+void xe_sync_ufence_put(struct xe_user_fence *ufence)
+{
+ user_fence_put(ufence);
+}
+
+/**
+ * xe_sync_ufence_get_status() - Get user fence status
+ * @ufence: user fence
+ *
+ * Return: 1 if signalled, 0 not signalled, <0 on error
+ */
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence)
+{
+ return READ_ONCE(ufence->signalled);
+}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index d284afbe917c..0fd0d51208e6 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -33,4 +33,13 @@ struct dma_fence *
xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
struct xe_exec_queue *q, struct xe_vm *vm);
+static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
+{
+ return !!sync->ufence;
+}
+
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
+void xe_sync_ufence_put(struct xe_user_fence *ufence);
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sync_types.h b/drivers/gpu/drm/xe/xe_sync_types.h
index 852db5e7884f..30ac3f51993b 100644
--- a/drivers/gpu/drm/xe/xe_sync_types.h
+++ b/drivers/gpu/drm/xe/xe_sync_types.h
@@ -18,7 +18,7 @@ struct xe_sync_entry {
struct drm_syncobj *syncobj;
struct dma_fence *fence;
struct dma_fence_chain *chain_fence;
- struct user_fence *ufence;
+ struct xe_user_fence *ufence;
u64 addr;
u64 timeline_value;
u32 type;
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 95163c303f3e..4ddc55527f9a 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -12,6 +12,7 @@
#include <linux/tracepoint.h>
#include <linux/types.h>
+#include "xe_bo.h"
#include "xe_bo_types.h"
#include "xe_exec_queue_types.h"
#include "xe_gpu_scheduler_types.h"
@@ -26,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(u64, fence)
+ __field(struct xe_gt_tlb_invalidation_fence *, fence)
__field(int, seqno)
),
TP_fast_assign(
- __entry->fence = (u64)fence;
+ __entry->fence = fence;
__entry->seqno = fence->seqno;
),
- TP_printk("fence=0x%016llx, seqno=%d",
+ TP_printk("fence=%p, seqno=%d",
__entry->fence, __entry->seqno)
);
@@ -82,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo,
TP_STRUCT__entry(
__field(size_t, size)
__field(u32, flags)
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
),
TP_fast_assign(
__entry->size = bo->size;
__entry->flags = bo->flags;
- __entry->vm = (unsigned long)bo->vm;
+ __entry->vm = bo->vm;
),
- TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
+ TP_printk("size=%zu, flags=0x%02x, vm=%p",
__entry->size, __entry->flags, __entry->vm)
);
@@ -100,9 +101,31 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
TP_ARGS(bo)
);
-DEFINE_EVENT(xe_bo, xe_bo_move,
- TP_PROTO(struct xe_bo *bo),
- TP_ARGS(bo)
+TRACE_EVENT(xe_bo_move,
+ TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
+ bool move_lacks_source),
+ TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
+ TP_STRUCT__entry(
+ __field(struct xe_bo *, bo)
+ __field(size_t, size)
+ __field(u32, new_placement)
+ __field(u32, old_placement)
+ __array(char, device_id, 12)
+ __field(bool, move_lacks_source)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->size = bo->size;
+ __entry->new_placement = new_placement;
+ __entry->old_placement = old_placement;
+ strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
+ __entry->move_lacks_source = move_lacks_source;
+ ),
+ TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
+ __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
+ xe_mem_type_to_name[__entry->old_placement],
+ xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
);
DECLARE_EVENT_CLASS(xe_exec_queue,
@@ -327,16 +350,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
TP_STRUCT__entry(
__field(u64, ctx)
__field(u32, seqno)
- __field(u64, fence)
+ __field(struct xe_hw_fence *, fence)
),
TP_fast_assign(
__entry->ctx = fence->dma.context;
__entry->seqno = fence->dma.seqno;
- __entry->fence = (unsigned long)fence;
+ __entry->fence = fence;
),
- TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
+ TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
__entry->ctx, __entry->fence, __entry->seqno)
);
@@ -365,7 +388,7 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_ARGS(vma),
TP_STRUCT__entry(
- __field(u64, vma)
+ __field(struct xe_vma *, vma)
__field(u32, asid)
__field(u64, start)
__field(u64, end)
@@ -373,14 +396,14 @@ DECLARE_EVENT_CLASS(xe_vma,
),
TP_fast_assign(
- __entry->vma = (unsigned long)vma;
+ __entry->vma = vma;
__entry->asid = xe_vma_vm(vma)->usm.asid;
__entry->start = xe_vma_start(vma);
__entry->end = xe_vma_end(vma) - 1;
__entry->ptr = xe_vma_userptr(vma);
),
- TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
+ TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
__entry->vma, __entry->asid, __entry->start,
__entry->end, __entry->ptr)
)
@@ -465,16 +488,16 @@ DECLARE_EVENT_CLASS(xe_vm,
TP_ARGS(vm),
TP_STRUCT__entry(
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
__field(u32, asid)
),
TP_fast_assign(
- __entry->vm = (unsigned long)vm;
+ __entry->vm = vm;
__entry->asid = vm->usm.asid;
),
- TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm,
+ TP_printk("vm=%p, asid=0x%05x", __entry->vm,
__entry->asid)
);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 10b6995fbf29..3b21afe5b488 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -37,8 +37,6 @@
#include "generated/xe_wa_oob.h"
#include "xe_wa.h"
-#define TEST_VM_ASYNC_OPS_ERROR
-
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{
return vm->gpuvm.r_obj;
@@ -46,7 +44,7 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
/**
* xe_vma_userptr_check_repin() - Advisory check for repin needed
- * @vma: The userptr vma
+ * @uvma: The userptr vma
*
* Check if the userptr vma has been invalidated since last successful
* repin. The check is advisory only and can the function can be called
@@ -56,15 +54,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
*
* Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
*/
-int xe_vma_userptr_check_repin(struct xe_vma *vma)
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
{
- return mmu_interval_check_retry(&vma->userptr.notifier,
- vma->userptr.notifier_seq) ?
+ return mmu_interval_check_retry(&uvma->userptr.notifier,
+ uvma->userptr.notifier_seq) ?
-EAGAIN : 0;
}
-int xe_vma_userptr_pin_pages(struct xe_vma *vma)
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_device *xe = vm->xe;
const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
@@ -80,30 +80,30 @@ retry:
if (vma->gpuva.flags & XE_VMA_DESTROYED)
return 0;
- notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
- if (notifier_seq == vma->userptr.notifier_seq)
+ notifier_seq = mmu_interval_read_begin(&userptr->notifier);
+ if (notifier_seq == userptr->notifier_seq)
return 0;
pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return -ENOMEM;
- if (vma->userptr.sg) {
+ if (userptr->sg) {
dma_unmap_sgtable(xe->drm.dev,
- vma->userptr.sg,
+ userptr->sg,
read_only ? DMA_TO_DEVICE :
DMA_BIDIRECTIONAL, 0);
- sg_free_table(vma->userptr.sg);
- vma->userptr.sg = NULL;
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
}
pinned = ret = 0;
if (in_kthread) {
- if (!mmget_not_zero(vma->userptr.notifier.mm)) {
+ if (!mmget_not_zero(userptr->notifier.mm)) {
ret = -EFAULT;
goto mm_closed;
}
- kthread_use_mm(vma->userptr.notifier.mm);
+ kthread_use_mm(userptr->notifier.mm);
}
while (pinned < num_pages) {
@@ -112,43 +112,40 @@ retry:
num_pages - pinned,
read_only ? 0 : FOLL_WRITE,
&pages[pinned]);
- if (ret < 0) {
- if (in_kthread)
- ret = 0;
+ if (ret < 0)
break;
- }
pinned += ret;
ret = 0;
}
if (in_kthread) {
- kthread_unuse_mm(vma->userptr.notifier.mm);
- mmput(vma->userptr.notifier.mm);
+ kthread_unuse_mm(userptr->notifier.mm);
+ mmput(userptr->notifier.mm);
}
mm_closed:
if (ret)
goto out;
- ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
+ ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
pinned, 0,
(u64)pinned << PAGE_SHIFT,
xe_sg_segment_size(xe->drm.dev),
GFP_KERNEL);
if (ret) {
- vma->userptr.sg = NULL;
+ userptr->sg = NULL;
goto out;
}
- vma->userptr.sg = &vma->userptr.sgt;
+ userptr->sg = &userptr->sgt;
- ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
+ ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
read_only ? DMA_TO_DEVICE :
DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC |
DMA_ATTR_NO_KERNEL_MAPPING);
if (ret) {
- sg_free_table(vma->userptr.sg);
- vma->userptr.sg = NULL;
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
goto out;
}
@@ -167,8 +164,8 @@ out:
kvfree(pages);
if (!(ret < 0)) {
- vma->userptr.notifier_seq = notifier_seq;
- if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
+ userptr->notifier_seq = notifier_seq;
+ if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
goto retry;
}
@@ -635,7 +632,9 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
- struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
+ struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
+ struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
+ struct xe_vma *vma = &uvma->vma;
struct xe_vm *vm = xe_vma_vm(vma);
struct dma_resv_iter cursor;
struct dma_fence *fence;
@@ -651,7 +650,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
mmu_interval_set_seq(mni, cur_seq);
/* No need to stop gpu access if the userptr is not yet bound. */
- if (!vma->userptr.initial_bind) {
+ if (!userptr->initial_bind) {
up_write(&vm->userptr.notifier_lock);
return true;
}
@@ -663,7 +662,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
if (!xe_vm_in_fault_mode(vm) &&
!(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&vma->userptr.invalidate_link,
+ list_move_tail(&userptr->invalidate_link,
&vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock);
}
@@ -703,7 +702,7 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
int xe_vm_userptr_pin(struct xe_vm *vm)
{
- struct xe_vma *vma, *next;
+ struct xe_userptr_vma *uvma, *next;
int err = 0;
LIST_HEAD(tmp_evict);
@@ -711,22 +710,23 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
/* Collect invalidated userptrs */
spin_lock(&vm->userptr.invalidated_lock);
- list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
+ list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
userptr.invalidate_link) {
- list_del_init(&vma->userptr.invalidate_link);
- list_move_tail(&vma->combined_links.userptr,
+ list_del_init(&uvma->userptr.invalidate_link);
+ list_move_tail(&uvma->userptr.repin_link,
&vm->userptr.repin_list);
}
spin_unlock(&vm->userptr.invalidated_lock);
/* Pin and move to temporary list */
- list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
- combined_links.userptr) {
- err = xe_vma_userptr_pin_pages(vma);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ err = xe_vma_userptr_pin_pages(uvma);
if (err < 0)
return err;
- list_move_tail(&vma->combined_links.userptr, &vm->rebind_list);
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
}
return 0;
@@ -782,6 +782,14 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
return fence;
}
+static void xe_vma_free(struct xe_vma *vma)
+{
+ if (xe_vma_is_userptr(vma))
+ kfree(to_userptr_vma(vma));
+ else
+ kfree(vma);
+}
+
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
@@ -800,14 +808,26 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
- if (!bo && !is_null) /* userptr */
+ /*
+ * Allocate and ensure that the xe_vma_is_userptr() return
+ * matches what was allocated.
+ */
+ if (!bo && !is_null) {
+ struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
+
+ if (!uvma)
+ return ERR_PTR(-ENOMEM);
+
+ vma = &uvma->vma;
+ } else {
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- else
- vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
- GFP_KERNEL);
- if (!vma) {
- vma = ERR_PTR(-ENOMEM);
- return vma;
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
+
+ if (is_null)
+ vma->gpuva.flags |= DRM_GPUVA_SPARSE;
+ if (bo)
+ vma->gpuva.gem.obj = &bo->ttm.base;
}
INIT_LIST_HEAD(&vma->combined_links.rebind);
@@ -818,8 +838,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.va.range = end - start + 1;
if (read_only)
vma->gpuva.flags |= XE_VMA_READ_ONLY;
- if (is_null)
- vma->gpuva.flags |= DRM_GPUVA_SPARSE;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -836,35 +854,35 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
if (IS_ERR(vm_bo)) {
- kfree(vma);
+ xe_vma_free(vma);
return ERR_CAST(vm_bo);
}
drm_gpuvm_bo_extobj_add(vm_bo);
drm_gem_object_get(&bo->ttm.base);
- vma->gpuva.gem.obj = &bo->ttm.base;
vma->gpuva.gem.offset = bo_offset_or_userptr;
drm_gpuva_link(&vma->gpuva, vm_bo);
drm_gpuvm_bo_put(vm_bo);
} else /* userptr or null */ {
if (!is_null) {
+ struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
u64 size = end - start + 1;
int err;
- INIT_LIST_HEAD(&vma->userptr.invalidate_link);
+ INIT_LIST_HEAD(&userptr->invalidate_link);
+ INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
- err = mmu_interval_notifier_insert(&vma->userptr.notifier,
+ err = mmu_interval_notifier_insert(&userptr->notifier,
current->mm,
xe_vma_userptr(vma), size,
&vma_userptr_notifier_ops);
if (err) {
- kfree(vma);
- vma = ERR_PTR(err);
- return vma;
+ xe_vma_free(vma);
+ return ERR_PTR(err);
}
- vma->userptr.notifier_seq = LONG_MAX;
+ userptr->notifier_seq = LONG_MAX;
}
xe_vm_get(vm);
@@ -879,14 +897,21 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
struct xe_device *xe = vm->xe;
bool read_only = xe_vma_read_only(vma);
+ if (vma->ufence) {
+ xe_sync_ufence_put(vma->ufence);
+ vma->ufence = NULL;
+ }
+
if (xe_vma_is_userptr(vma)) {
- if (vma->userptr.sg) {
+ struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
+
+ if (userptr->sg) {
dma_unmap_sgtable(xe->drm.dev,
- vma->userptr.sg,
+ userptr->sg,
read_only ? DMA_TO_DEVICE :
DMA_BIDIRECTIONAL, 0);
- sg_free_table(vma->userptr.sg);
- vma->userptr.sg = NULL;
+ sg_free_table(userptr->sg);
+ userptr->sg = NULL;
}
/*
@@ -894,7 +919,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
* the notifer until we're sure the GPU is not accessing
* them anymore
*/
- mmu_interval_notifier_remove(&vma->userptr.notifier);
+ mmu_interval_notifier_remove(&userptr->notifier);
xe_vm_put(vm);
} else if (xe_vma_is_null(vma)) {
xe_vm_put(vm);
@@ -902,7 +927,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
xe_bo_put(xe_vma_bo(vma));
}
- kfree(vma);
+ xe_vma_free(vma);
}
static void vma_destroy_work_func(struct work_struct *w)
@@ -933,7 +958,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
spin_lock(&vm->userptr.invalidated_lock);
- list_del(&vma->userptr.invalidate_link);
+ list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
} else if (!xe_vma_is_null(vma)) {
xe_bo_assert_held(xe_vma_bo(vma));
@@ -975,9 +1000,16 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
int err;
XE_WARN_ON(!vm);
- err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
- if (!err && bo && !bo->vm)
- err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
+ if (num_shared)
+ err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
+ else
+ err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
+ if (!err && bo && !bo->vm) {
+ if (num_shared)
+ err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
+ else
+ err = drm_exec_lock_obj(exec, &bo->ttm.base);
+ }
return err;
}
@@ -1581,6 +1613,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
trace_xe_vma_unbind(vma);
+ if (vma->ufence) {
+ struct xe_user_fence * const f = vma->ufence;
+
+ if (!xe_sync_ufence_get_status(f))
+ return ERR_PTR(-EBUSY);
+
+ vma->ufence = NULL;
+ xe_sync_ufence_put(f);
+ }
+
if (number_tiles > 1) {
fences = kmalloc_array(number_tiles, sizeof(*fences),
GFP_KERNEL);
@@ -1714,6 +1756,21 @@ err_fences:
return ERR_PTR(err);
}
+static struct xe_user_fence *
+find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_syncs; i++) {
+ struct xe_sync_entry *e = &syncs[i];
+
+ if (xe_sync_is_ufence(e))
+ return xe_sync_ufence_get(e);
+ }
+
+ return NULL;
+}
+
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
u32 num_syncs, bool immediate, bool first_op,
@@ -1721,9 +1778,16 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
{
struct dma_fence *fence;
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
+ struct xe_user_fence *ufence;
xe_vm_assert_held(vm);
+ ufence = find_ufence_get(syncs, num_syncs);
+ if (vma->ufence && ufence)
+ xe_sync_ufence_put(vma->ufence);
+
+ vma->ufence = ufence ?: vma->ufence;
+
if (immediate) {
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
last_op);
@@ -1855,10 +1919,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
mutex_lock(&xef->vm.lock);
err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->vm.lock);
- if (err) {
- xe_vm_close_and_put(vm);
- return err;
- }
+ if (err)
+ goto err_close_and_put;
if (xe->info.has_asid) {
mutex_lock(&xe->usm.lock);
@@ -1866,11 +1928,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
XA_LIMIT(1, XE_MAX_ASID - 1),
&xe->usm.next_asid, GFP_KERNEL);
mutex_unlock(&xe->usm.lock);
- if (err < 0) {
- xe_vm_close_and_put(vm);
- return err;
- }
- err = 0;
+ if (err < 0)
+ goto err_free_id;
+
vm->usm.asid = asid;
}
@@ -1888,6 +1948,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
#endif
return 0;
+
+err_free_id:
+ mutex_lock(&xef->vm.lock);
+ xa_erase(&xef->vm.xa, id);
+ mutex_unlock(&xef->vm.lock);
+err_close_and_put:
+ xe_vm_close_and_put(vm);
+
+ return err;
}
int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
@@ -1954,6 +2023,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
xe_sync_entry_signal(&syncs[i], NULL, fence);
+ dma_fence_put(fence);
}
}
@@ -2034,7 +2104,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
- struct xe_vma_op *op;
struct drm_gpuvm_bo *vm_bo;
int err;
@@ -2081,23 +2150,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
if (IS_ERR(ops))
return ops;
-#ifdef TEST_VM_ASYNC_OPS_ERROR
- if (operation & FORCE_ASYNC_OP_ERROR) {
- op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
- base.entry);
- if (op)
- op->inject_error = true;
- }
-#endif
-
drm_gpuva_for_each_op(__op, ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) {
- op->map.immediate =
- flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
- op->map.read_only =
- flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
@@ -2145,7 +2201,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
drm_exec_fini(&exec);
if (xe_vma_is_userptr(vma)) {
- err = xe_vma_userptr_pin_pages(vma);
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
if (err) {
prep_vma_destroy(vm, vma, false);
xe_vma_destroy_unlocked(vma);
@@ -2167,13 +2223,17 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
{
if (vma->gpuva.flags & XE_VMA_PTE_1G)
return SZ_1G;
- else if (vma->gpuva.flags & XE_VMA_PTE_2M)
+ else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
return SZ_2M;
+ else if (vma->gpuva.flags & XE_VMA_PTE_64K)
+ return SZ_64K;
+ else if (vma->gpuva.flags & XE_VMA_PTE_4K)
+ return SZ_4K;
- return SZ_4K;
+ return SZ_1G; /* Uninitialized, used max size */
}
-static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
+static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
{
switch (size) {
case SZ_1G:
@@ -2182,9 +2242,13 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
case SZ_2M:
vma->gpuva.flags |= XE_VMA_PTE_2M;
break;
+ case SZ_64K:
+ vma->gpuva.flags |= XE_VMA_PTE_64K;
+ break;
+ case SZ_4K:
+ vma->gpuva.flags |= XE_VMA_PTE_4K;
+ break;
}
-
- return SZ_4K;
}
static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
@@ -2282,8 +2346,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
- flags |= op->map.read_only ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
@@ -2414,7 +2476,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs,
- op->map.immediate || !xe_vm_in_fault_mode(vm),
+ !xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
break;
@@ -2500,13 +2562,25 @@ retry_userptr:
}
drm_exec_fini(&exec);
- if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
+ if (err == -EAGAIN) {
lockdep_assert_held_write(&vm->lock);
- err = xe_vma_userptr_pin_pages(vma);
- if (!err)
- goto retry_userptr;
- trace_xe_vma_fail(vma);
+ if (op->base.op == DRM_GPUVA_OP_REMAP) {
+ if (!op->remap.unmap_done)
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ else if (op->remap.prev)
+ vma = op->remap.prev;
+ else
+ vma = op->remap.next;
+ }
+
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ if (!err)
+ goto retry_userptr;
+
+ trace_xe_vma_fail(vma);
+ }
}
return err;
@@ -2518,13 +2592,6 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
lockdep_assert_held_write(&vm->lock);
-#ifdef TEST_VM_ASYNC_OPS_ERROR
- if (op->inject_error) {
- op->inject_error = false;
- return -ENOMEM;
- }
-#endif
-
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
ret = __xe_vma_op_execute(vm, op->map.vma, op);
@@ -2639,7 +2706,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
{
int i;
- for (i = num_ops_list - 1; i; ++i) {
+ for (i = num_ops_list - 1; i >= 0; --i) {
struct drm_gpuva_ops *__ops = ops[i];
struct drm_gpuva_op *__op;
@@ -2684,21 +2751,11 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return 0;
}
-#ifdef TEST_VM_ASYNC_OPS_ERROR
-#define SUPPORTED_FLAGS \
- (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \
- DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
-#else
-#define SUPPORTED_FLAGS \
- (DRM_XE_VM_BIND_FLAG_READONLY | \
- DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
- 0xffff)
-#endif
+#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
+ DRM_XE_VM_BIND_FLAG_DUMPABLE)
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
-#define MAX_BINDS 512 /* FIXME: Picking random upper limit */
-
static int vm_bind_ioctl_check_args(struct xe_device *xe,
struct drm_xe_vm_bind *args,
struct drm_xe_vm_bind_op **bind_ops)
@@ -2710,16 +2767,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->extensions) ||
- XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
+ if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
if (args->num_binds > 1) {
u64 __user *bind_user =
u64_to_user_ptr(args->vector_of_binds);
- *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
- args->num_binds, GFP_KERNEL);
+ *bind_ops = kvmalloc_array(args->num_binds,
+ sizeof(struct drm_xe_vm_bind_op),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!*bind_ops)
return -ENOMEM;
@@ -2809,7 +2866,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
free_bind_ops:
if (args->num_binds > 1)
- kfree(*bind_ops);
+ kvfree(*bind_ops);
return err;
}
@@ -2846,7 +2903,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_gpuva_ops **ops = NULL;
struct xe_vm *vm;
struct xe_exec_queue *q = NULL;
- u32 num_syncs;
+ u32 num_syncs, num_ufence = 0;
struct xe_sync_entry *syncs = NULL;
struct drm_xe_vm_bind_op *bind_ops;
LIST_HEAD(ops_list);
@@ -2897,13 +2954,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (args->num_binds) {
- bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
+ bos = kvcalloc(args->num_binds, sizeof(*bos),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!bos) {
err = -ENOMEM;
goto release_vm_lock;
}
- ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
+ ops = kvcalloc(args->num_binds, sizeof(*ops),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto release_vm_lock;
@@ -2983,6 +3042,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0));
if (err)
goto free_syncs;
+
+ if (xe_sync_is_ufence(&syncs[num_syncs]))
+ num_ufence++;
+ }
+
+ if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
+ err = -EINVAL;
+ goto free_syncs;
}
if (!args->num_binds) {
@@ -3036,10 +3103,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; bos && i < args->num_binds; ++i)
xe_bo_put(bos[i]);
- kfree(bos);
- kfree(ops);
+ kvfree(bos);
+ kvfree(ops);
if (args->num_binds > 1)
- kfree(bind_ops);
+ kvfree(bind_ops);
return err;
@@ -3063,10 +3130,10 @@ put_exec_queue:
if (q)
xe_exec_queue_put(q);
free_objs:
- kfree(bos);
- kfree(ops);
+ kvfree(bos);
+ kvfree(ops);
if (args->num_binds > 1)
- kfree(bind_ops);
+ kvfree(bind_ops);
return err;
}
@@ -3125,8 +3192,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
WARN_ON_ONCE(!mmu_interval_check_retry
- (&vma->userptr.notifier,
- vma->userptr.notifier_seq));
+ (&to_userptr_vma(vma)->userptr.notifier,
+ to_userptr_vma(vma)->userptr.notifier_seq));
WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
DMA_RESV_USAGE_BOOKKEEP));
@@ -3187,11 +3254,11 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
if (is_null) {
addr = 0;
} else if (is_userptr) {
+ struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
struct xe_res_cursor cur;
- if (vma->userptr.sg) {
- xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
- &cur);
+ if (sg) {
+ xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
addr = xe_res_dma(&cur);
} else {
addr = 0;
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index cf2f96e8c1ab..9654a0612fc2 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -160,6 +160,18 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
}
+/**
+ * to_userptr_vma() - Return a pointer to an embedding userptr vma
+ * @vma: Pointer to the embedded struct xe_vma
+ *
+ * Return: Pointer to the embedding userptr vma
+ */
+static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
+{
+ xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
+ return container_of(vma, struct xe_userptr_vma, vma);
+}
+
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -224,9 +236,9 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
}
}
-int xe_vma_userptr_pin_pages(struct xe_vma *vma);
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
-int xe_vma_userptr_check_repin(struct xe_vma *vma);
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 63e8a50b88e9..7300eea5394b 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -19,11 +19,9 @@
struct xe_bo;
struct xe_sync_entry;
+struct xe_user_fence;
struct xe_vm;
-#define TEST_VM_ASYNC_OPS_ERROR
-#define FORCE_ASYNC_OP_ERROR BIT(31)
-
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
@@ -32,11 +30,15 @@ struct xe_vm;
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
+#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
/** struct xe_userptr - User pointer */
struct xe_userptr {
/** @invalidate_link: Link for the vm::userptr.invalidated list */
struct list_head invalidate_link;
+ /** @userptr: link into VM repin list if userptr. */
+ struct list_head repin_link;
/**
* @notifier: MMU notifier for user pointer (invalidation call back)
*/
@@ -68,8 +70,6 @@ struct xe_vma {
* resv.
*/
union {
- /** @userptr: link into VM repin list if userptr. */
- struct list_head userptr;
/** @rebind: link into VM if this VMA needs rebinding. */
struct list_head rebind;
/** @destroy: link to contested list when VM is being closed. */
@@ -107,9 +107,19 @@ struct xe_vma {
u16 pat_index;
/**
- * @userptr: user pointer state, only allocated for VMAs that are
- * user pointers
+ * @ufence: The user fence that was provided with MAP.
+ * Needs to be signalled before UNMAP can be processed.
*/
+ struct xe_user_fence *ufence;
+};
+
+/**
+ * struct xe_userptr_vma - A userptr vma subclass
+ * @vma: The vma.
+ * @userptr: Additional userptr information.
+ */
+struct xe_userptr_vma {
+ struct xe_vma vma;
struct xe_userptr userptr;
};
@@ -285,10 +295,6 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
- /** @immediate: Immediate bind */
- bool immediate;
- /** @read_only: Read only */
- bool read_only;
/** @is_null: is NULL binding */
bool is_null;
/** @pat_index: The pat index to use for this operation. */
@@ -356,11 +362,6 @@ struct xe_vma_op {
/** @flags: operation flags */
enum xe_vma_op_flags flags;
-#ifdef TEST_VM_ASYNC_OPS_ERROR
- /** @inject_error: inject error to test async op error handling */
- bool inject_error;
-#endif
-
union {
/** @map: VMA map operation specific data */
struct xe_vma_op_map map;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 42fd504abbcd..89983d7d73ca 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -169,6 +169,7 @@ static const struct host1x_info host1x06_info = {
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
.reserve_vblank_syncpts = false,
+ .skip_reset_assert = true,
};
static const struct host1x_sid_entry tegra194_sid_table[] = {
@@ -680,13 +681,15 @@ static int __maybe_unused host1x_runtime_suspend(struct device *dev)
host1x_intr_stop(host);
host1x_syncpt_save(host);
- err = reset_control_bulk_assert(host->nresets, host->resets);
- if (err) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- goto resume_host1x;
- }
+ if (!host->info->skip_reset_assert) {
+ err = reset_control_bulk_assert(host->nresets, host->resets);
+ if (err) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ goto resume_host1x;
+ }
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
+ }
clk_disable_unprepare(host->clk);
reset_control_bulk_release(host->nresets, host->resets);
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index c8e302de7625..925a118db23f 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -116,6 +116,12 @@ struct host1x_info {
* the display driver disables VBLANK increments.
*/
bool reserve_vblank_syncpts;
+ /*
+ * On Tegra186, secure world applications may require access to
+ * host1x during suspend/resume. To allow this, we need to leave
+ * host1x not in reset.
+ */
+ bool skip_reset_assert;
};
struct host1x {
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index d9ef45fcaeab..470ae2c29c94 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -143,6 +143,9 @@ u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *s
}
EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
+/* Disables missing prototype warnings */
+__bpf_kfunc_start_defs();
+
/**
* hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
*
@@ -152,7 +155,7 @@ EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
*
* @returns %NULL on error, an %__u8 memory pointer on success
*/
-noinline __u8 *
+__bpf_kfunc __u8 *
hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
{
struct hid_bpf_ctx_kern *ctx_kern;
@@ -167,6 +170,7 @@ hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr
return ctx_kern->data + offset;
}
+__bpf_kfunc_end_defs();
/*
* The following set contains all functions we agree BPF programs
@@ -241,6 +245,42 @@ int hid_bpf_reconnect(struct hid_device *hdev)
return 0;
}
+static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct bpf_prog *prog,
+ __u32 flags)
+{
+ int fd, err, prog_type;
+
+ prog_type = hid_bpf_get_prog_attach_type(prog);
+ if (prog_type < 0)
+ return prog_type;
+
+ if (prog_type >= HID_BPF_PROG_TYPE_MAX)
+ return -EINVAL;
+
+ if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
+ err = hid_bpf_allocate_event_data(hdev);
+ if (err)
+ return err;
+ }
+
+ fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, prog, flags);
+ if (fd < 0)
+ return fd;
+
+ if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
+ err = hid_bpf_reconnect(hdev);
+ if (err) {
+ close_fd(fd);
+ return err;
+ }
+ }
+
+ return fd;
+}
+
+/* Disables missing prototype warnings */
+__bpf_kfunc_start_defs();
+
/**
* hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
*
@@ -253,22 +293,17 @@ int hid_bpf_reconnect(struct hid_device *hdev)
* is pinned to the BPF file system).
*/
/* called from syscall */
-noinline int
+__bpf_kfunc int
hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
{
struct hid_device *hdev;
+ struct bpf_prog *prog;
struct device *dev;
- int fd, err, prog_type = hid_bpf_get_prog_attach_type(prog_fd);
+ int err, fd;
if (!hid_bpf_ops)
return -EINVAL;
- if (prog_type < 0)
- return prog_type;
-
- if (prog_type >= HID_BPF_PROG_TYPE_MAX)
- return -EINVAL;
-
if ((flags & ~HID_BPF_FLAG_MASK))
return -EINVAL;
@@ -278,25 +313,29 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
hdev = to_hid_device(dev);
- if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
- err = hid_bpf_allocate_event_data(hdev);
- if (err)
- return err;
+ /*
+ * take a ref on the prog itself, it will be released
+ * on errors or when it'll be detached
+ */
+ prog = bpf_prog_get(prog_fd);
+ if (IS_ERR(prog)) {
+ err = PTR_ERR(prog);
+ goto out_dev_put;
}
- fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags);
- if (fd < 0)
- return fd;
-
- if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
- err = hid_bpf_reconnect(hdev);
- if (err) {
- close_fd(fd);
- return err;
- }
+ fd = do_hid_bpf_attach_prog(hdev, prog_fd, prog, flags);
+ if (fd < 0) {
+ err = fd;
+ goto out_prog_put;
}
return fd;
+
+ out_prog_put:
+ bpf_prog_put(prog);
+ out_dev_put:
+ put_device(dev);
+ return err;
}
/**
@@ -306,7 +345,7 @@ hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
*
* @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
*/
-noinline struct hid_bpf_ctx *
+__bpf_kfunc struct hid_bpf_ctx *
hid_bpf_allocate_context(unsigned int hid_id)
{
struct hid_device *hdev;
@@ -323,8 +362,10 @@ hid_bpf_allocate_context(unsigned int hid_id)
hdev = to_hid_device(dev);
ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
- if (!ctx_kern)
+ if (!ctx_kern) {
+ put_device(dev);
return NULL;
+ }
ctx_kern->ctx.hid = hdev;
@@ -337,14 +378,19 @@ hid_bpf_allocate_context(unsigned int hid_id)
* @ctx: the HID-BPF context to release
*
*/
-noinline void
+__bpf_kfunc void
hid_bpf_release_context(struct hid_bpf_ctx *ctx)
{
struct hid_bpf_ctx_kern *ctx_kern;
+ struct hid_device *hid;
ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
+ hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
kfree(ctx_kern);
+
+ /* get_device() is called by bus_find_device() */
+ put_device(&hid->dev);
}
/**
@@ -358,7 +404,7 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx)
*
* @returns %0 on success, a negative error code otherwise.
*/
-noinline int
+__bpf_kfunc int
hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
enum hid_report_type rtype, enum hid_class_request reqtype)
{
@@ -426,6 +472,7 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
kfree(dma_data);
return ret;
}
+__bpf_kfunc_end_defs();
/* our HID-BPF entrypoints */
BTF_SET8_START(hid_bpf_fmodret_ids)
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.h b/drivers/hid/bpf/hid_bpf_dispatch.h
index 63dfc8605cd2..fbe0639d09f2 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.h
+++ b/drivers/hid/bpf/hid_bpf_dispatch.h
@@ -12,9 +12,9 @@ struct hid_bpf_ctx_kern {
int hid_bpf_preload_skel(void);
void hid_bpf_free_links_and_skel(void);
-int hid_bpf_get_prog_attach_type(int prog_fd);
+int hid_bpf_get_prog_attach_type(struct bpf_prog *prog);
int __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, int prog_fd,
- __u32 flags);
+ struct bpf_prog *prog, __u32 flags);
void __hid_bpf_destroy_device(struct hid_device *hdev);
int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
struct hid_bpf_ctx_kern *ctx_kern);
diff --git a/drivers/hid/bpf/hid_bpf_jmp_table.c b/drivers/hid/bpf/hid_bpf_jmp_table.c
index eca34b7372f9..aa8e1c79cdf5 100644
--- a/drivers/hid/bpf/hid_bpf_jmp_table.c
+++ b/drivers/hid/bpf/hid_bpf_jmp_table.c
@@ -196,6 +196,7 @@ static void __hid_bpf_do_release_prog(int map_fd, unsigned int idx)
static void hid_bpf_release_progs(struct work_struct *work)
{
int i, j, n, map_fd = -1;
+ bool hdev_destroyed;
if (!jmp_table.map)
return;
@@ -220,6 +221,12 @@ static void hid_bpf_release_progs(struct work_struct *work)
if (entry->hdev) {
hdev = entry->hdev;
type = entry->type;
+ /*
+ * hdev is still valid, even if we are called after hid_destroy_device():
+ * when hid_bpf_attach() gets called, it takes a ref on the dev through
+ * bus_find_device()
+ */
+ hdev_destroyed = hdev->bpf.destroyed;
hid_bpf_populate_hdev(hdev, type);
@@ -232,12 +239,19 @@ static void hid_bpf_release_progs(struct work_struct *work)
if (test_bit(next->idx, jmp_table.enabled))
continue;
- if (next->hdev == hdev && next->type == type)
+ if (next->hdev == hdev && next->type == type) {
+ /*
+ * clear the hdev reference and decrement the device ref
+ * that was taken during bus_find_device() while calling
+ * hid_bpf_attach()
+ */
next->hdev = NULL;
+ put_device(&hdev->dev);
+ }
}
- /* if type was rdesc fixup, reconnect device */
- if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP)
+ /* if type was rdesc fixup and the device is not gone, reconnect device */
+ if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP && !hdev_destroyed)
hid_bpf_reconnect(hdev);
}
}
@@ -333,15 +347,10 @@ static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
return err;
}
-int hid_bpf_get_prog_attach_type(int prog_fd)
+int hid_bpf_get_prog_attach_type(struct bpf_prog *prog)
{
- struct bpf_prog *prog = NULL;
- int i;
int prog_type = HID_BPF_PROG_TYPE_UNDEF;
-
- prog = bpf_prog_get(prog_fd);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
+ int i;
for (i = 0; i < HID_BPF_PROG_TYPE_MAX; i++) {
if (hid_bpf_btf_ids[i] == prog->aux->attach_btf_id) {
@@ -350,8 +359,6 @@ int hid_bpf_get_prog_attach_type(int prog_fd)
}
}
- bpf_prog_put(prog);
-
return prog_type;
}
@@ -388,19 +395,13 @@ static const struct bpf_link_ops hid_bpf_link_lops = {
/* called from syscall */
noinline int
__hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
- int prog_fd, __u32 flags)
+ int prog_fd, struct bpf_prog *prog, __u32 flags)
{
struct bpf_link_primer link_primer;
struct hid_bpf_link *link;
- struct bpf_prog *prog = NULL;
struct hid_bpf_prog_entry *prog_entry;
int cnt, err = -EINVAL, prog_table_idx = -1;
- /* take a ref on the prog itself */
- prog = bpf_prog_get(prog_fd);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
-
mutex_lock(&hid_bpf_attach_lock);
link = kzalloc(sizeof(*link), GFP_USER);
@@ -467,7 +468,6 @@ __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
err_unlock:
mutex_unlock(&hid_bpf_attach_lock);
- bpf_prog_put(prog);
kfree(link);
return err;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index fb30e228d35f..828a5c022c64 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -298,6 +298,9 @@
#define USB_VENDOR_ID_CIDC 0x1677
+#define I2C_VENDOR_ID_CIRQUE 0x0488
+#define I2C_PRODUCT_ID_CIRQUE_1063 0x1063
+
#define USB_VENDOR_ID_CJTOUCH 0x24b8
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index fd6d8f1d9b8f..d2f3f234f29d 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -203,6 +203,8 @@ struct hidpp_device {
struct hidpp_scroll_counter vertical_wheel_counter;
u8 wireless_feature_index;
+
+ bool connected_once;
};
/* HID++ 1.0 error codes */
@@ -988,8 +990,13 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
hidpp->protocol_minor = response.rap.params[1];
print_version:
- hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
+ if (!hidpp->connected_once) {
+ hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
+ hidpp->connected_once = true;
+ } else
+ hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
return 0;
}
@@ -4184,7 +4191,7 @@ static void hidpp_connect_event(struct work_struct *work)
/* Get device version to check if it is connected */
ret = hidpp_root_get_protocol_version(hidpp);
if (ret) {
- hid_info(hidpp->hid_dev, "Disconnected\n");
+ hid_dbg(hidpp->hid_dev, "Disconnected\n");
if (hidpp->battery.ps) {
hidpp->battery.online = false;
hidpp->battery.status = POWER_SUPPLY_STATUS_UNKNOWN;
@@ -4610,6 +4617,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
{ /* Logitech G Pro X Superlight Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
+ { /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) },
{ /* G935 Gaming Headset */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87),
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fd5b0637dad6..3e91e4d6ba6f 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2153,6 +2153,10 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xcddc) },
+
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xce08) },
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
index 82d0a77359c4..58b15750dbb0 100644
--- a/drivers/hid/hid-nvidia-shield.c
+++ b/drivers/hid/hid-nvidia-shield.c
@@ -800,6 +800,8 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts)
led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
"thunderstrike%d:blue:led", ts->id);
+ if (!led->name)
+ return -ENOMEM;
led->max_brightness = 1;
led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN;
led->brightness_get = &thunderstrike_led_get_brightness;
@@ -831,6 +833,8 @@ static inline int thunderstrike_psy_create(struct shield_device *shield_dev)
shield_dev->battery_dev.desc.name =
devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
"thunderstrike_%d", ts->id);
+ if (!shield_dev->battery_dev.desc.name)
+ return -ENOMEM;
shield_dev->battery_dev.psy = power_supply_register(
&hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg);
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index b3c4e50e248a..b08a5ab58528 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -1109,10 +1109,9 @@ static int steam_probe(struct hid_device *hdev,
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
steam = devm_kzalloc(&hdev->dev, sizeof(*steam), GFP_KERNEL);
- if (!steam) {
- ret = -ENOMEM;
- goto steam_alloc_fail;
- }
+ if (!steam)
+ return -ENOMEM;
+
steam->hdev = hdev;
hid_set_drvdata(hdev, steam);
spin_lock_init(&steam->lock);
@@ -1129,14 +1128,14 @@ static int steam_probe(struct hid_device *hdev,
*/
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDRAW);
if (ret)
- goto hid_hw_start_fail;
+ goto err_cancel_work;
ret = hid_hw_open(hdev);
if (ret) {
hid_err(hdev,
"%s:hid_hw_open\n",
__func__);
- goto hid_hw_open_fail;
+ goto err_hw_stop;
}
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
@@ -1152,36 +1151,37 @@ static int steam_probe(struct hid_device *hdev,
hid_err(hdev,
"%s:steam_register failed with error %d\n",
__func__, ret);
- goto input_register_fail;
+ goto err_hw_close;
}
}
steam->client_hdev = steam_create_client_hid(hdev);
if (IS_ERR(steam->client_hdev)) {
ret = PTR_ERR(steam->client_hdev);
- goto client_hdev_fail;
+ goto err_stream_unregister;
}
steam->client_hdev->driver_data = steam;
ret = hid_add_device(steam->client_hdev);
if (ret)
- goto client_hdev_add_fail;
+ goto err_destroy;
return 0;
-client_hdev_add_fail:
- hid_hw_stop(hdev);
-client_hdev_fail:
+err_destroy:
hid_destroy_device(steam->client_hdev);
-input_register_fail:
-hid_hw_open_fail:
-hid_hw_start_fail:
+err_stream_unregister:
+ if (steam->connected)
+ steam_unregister(steam);
+err_hw_close:
+ hid_hw_close(hdev);
+err_hw_stop:
+ hid_hw_stop(hdev);
+err_cancel_work:
cancel_work_sync(&steam->work_connect);
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->rumble_work);
-steam_alloc_fail:
- hid_err(hdev, "%s: failed with error %d\n",
- __func__, ret);
+
return ret;
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 13c8dd8cd350..2bc762d31ac7 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -357,8 +357,11 @@ static int hidraw_release(struct inode * inode, struct file * file)
down_write(&minors_rwsem);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
- for (int i = list->tail; i < list->head; i++)
- kfree(list->buffer[i].value);
+ while (list->tail != list->head) {
+ kfree(list->buffer[list->tail].value);
+ list->buffer[list->tail].value = NULL;
+ list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
+ }
list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 90f316ae9819..2df1ab3c31cc 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -49,6 +49,7 @@
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(2)
#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(3)
#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(4)
+#define I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND BIT(5)
/* Command opcodes */
#define I2C_HID_OPCODE_RESET 0x01
@@ -131,6 +132,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_RESET_ON_RESUME },
{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
I2C_HID_QUIRK_BAD_INPUT_SIZE },
+ { I2C_VENDOR_ID_CIRQUE, I2C_PRODUCT_ID_CIRQUE_1063,
+ I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND },
/*
* Sending the wakeup after reset actually break ELAN touchscreen controller
*/
@@ -956,7 +959,8 @@ static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff)
return ret;
/* Save some power */
- i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
+ if (!(ihid->quirks & I2C_HID_QUIRK_NO_SLEEP_ON_SUSPEND))
+ i2c_hid_set_power(ihid, I2C_HID_PWR_SLEEP);
disable_irq(client->irq);
diff --git a/drivers/hid/i2c-hid/i2c-hid-of.c b/drivers/hid/i2c-hid/i2c-hid-of.c
index c4e1fa0273c8..8be4d576da77 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of.c
@@ -87,6 +87,7 @@ static int i2c_hid_of_probe(struct i2c_client *client)
if (!ihid_of)
return -ENOMEM;
+ ihid_of->client = client;
ihid_of->ops.power_up = i2c_hid_of_power_up;
ihid_of->ops.power_down = i2c_hid_of_power_down;
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index aa6cb033bb06..03d5601ce807 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -722,6 +722,8 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags);
list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
cl->state = ISHTP_CL_DISCONNECTED;
+ if (warm_reset && cl->device->reference_count)
+ continue;
/*
* Wake any pending process. The waiter would check dev->state
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 82c907f01bd3..8a7f2f6a4f86 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -49,7 +49,9 @@ static void ishtp_read_list_flush(struct ishtp_cl *cl)
list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
list_del(&rb->list);
- ishtp_io_rb_free(rb);
+ spin_lock(&cl->free_list_spinlock);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock(&cl->free_list_spinlock);
}
spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index b613f11ed949..2bc45b24075c 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2087,7 +2087,7 @@ static int wacom_allocate_inputs(struct wacom *wacom)
return 0;
}
-static int wacom_register_inputs(struct wacom *wacom)
+static int wacom_setup_inputs(struct wacom *wacom)
{
struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
@@ -2106,10 +2106,6 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(pen_input_dev);
wacom_wac->pen_input = NULL;
pen_input_dev = NULL;
- } else {
- error = input_register_device(pen_input_dev);
- if (error)
- goto fail;
}
error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac);
@@ -2118,10 +2114,6 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(touch_input_dev);
wacom_wac->touch_input = NULL;
touch_input_dev = NULL;
- } else {
- error = input_register_device(touch_input_dev);
- if (error)
- goto fail;
}
error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
@@ -2130,7 +2122,34 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(pad_input_dev);
wacom_wac->pad_input = NULL;
pad_input_dev = NULL;
- } else {
+ }
+
+ return 0;
+}
+
+static int wacom_register_inputs(struct wacom *wacom)
+{
+ struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+ int error = 0;
+
+ pen_input_dev = wacom_wac->pen_input;
+ touch_input_dev = wacom_wac->touch_input;
+ pad_input_dev = wacom_wac->pad_input;
+
+ if (pen_input_dev) {
+ error = input_register_device(pen_input_dev);
+ if (error)
+ goto fail;
+ }
+
+ if (touch_input_dev) {
+ error = input_register_device(touch_input_dev);
+ if (error)
+ goto fail;
+ }
+
+ if (pad_input_dev) {
error = input_register_device(pad_input_dev);
if (error)
goto fail;
@@ -2383,6 +2402,20 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
+ error = wacom_setup_inputs(wacom);
+ if (error)
+ goto fail;
+
+ if (features->type == HID_GENERIC)
+ connect_mask |= HID_CONNECT_DRIVER;
+
+ /* Regular HID work starts now */
+ error = hid_hw_start(hdev, connect_mask);
+ if (error) {
+ hid_err(hdev, "hw start failed\n");
+ goto fail;
+ }
+
error = wacom_register_inputs(wacom);
if (error)
goto fail;
@@ -2397,16 +2430,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
goto fail;
}
- if (features->type == HID_GENERIC)
- connect_mask |= HID_CONNECT_DRIVER;
-
- /* Regular HID work starts now */
- error = hid_hw_start(hdev, connect_mask);
- if (error) {
- hid_err(hdev, "hw start failed\n");
- goto fail;
- }
-
if (!wireless) {
/* Note that if query fails it is not a hard failure */
wacom_query_tablet_data(wacom);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index da8a01fedd39..fbe10fbc5769 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2575,7 +2575,14 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
wacom_wac->hid_data.tipswitch);
input_report_key(input, wacom_wac->tool[0], sense);
if (wacom_wac->serial[0]) {
- input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+ /*
+ * xf86-input-wacom does not accept a serial number
+ * of '0'. Report the low 32 bits if possible, but
+ * if they are zero, report the upper ones instead.
+ */
+ __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu;
+ __u32 serial_hi = wacom_wac->serial[0] >> 32;
+ input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi));
input_report_abs(input, ABS_MISC, sense ? id : 0);
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 56f7e06c673e..adbf674355b2 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -322,125 +322,89 @@ static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
- /* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
sizeof(struct vmbus_channel_gpadl_header) -
sizeof(struct gpa_range);
+ pfncount = umin(pagecount, pfnsize / sizeof(u64));
+
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_header) +
+ sizeof(struct gpa_range) + pfncount * sizeof(u64);
+ msgheader = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgheader)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&msgheader->submsglist);
+ msgheader->msgsize = msgsize;
+
+ gpadl_header = (struct vmbus_channel_gpadl_header *)
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
+ pagecount * sizeof(u64);
+ gpadl_header->range[0].byte_offset = 0;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
+ for (i = 0; i < pfncount; i++)
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
+ *msginfo = msgheader;
+
+ pfnsum = pfncount;
+ pfnleft = pagecount - pfncount;
+
+ /* how many pfns can we fit in a body message */
+ pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
+ sizeof(struct vmbus_channel_gpadl_body);
pfncount = pfnsize / sizeof(u64);
- if (pagecount > pfncount) {
- /* we need a gpadl body */
- /* fill in the header */
+ /*
+ * If pfnleft is zero, everything fits in the header and no body
+ * messages are needed
+ */
+ while (pfnleft) {
+ pfncurr = umin(pfncount, pfnleft);
msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pfncount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (!msgheader)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
- for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
- type, kbuffer, size, send_offset, i);
- *msginfo = msgheader;
-
- pfnsum = pfncount;
- pfnleft = pagecount - pfncount;
-
- /* how many pfns can we fit */
- pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
- sizeof(struct vmbus_channel_gpadl_body);
- pfncount = pfnsize / sizeof(u64);
-
- /* fill in the body */
- while (pfnleft) {
- if (pfnleft > pfncount)
- pfncurr = pfncount;
- else
- pfncurr = pfnleft;
-
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_body) +
- pfncurr * sizeof(u64);
- msgbody = kzalloc(msgsize, GFP_KERNEL);
-
- if (!msgbody) {
- struct vmbus_channel_msginfo *pos = NULL;
- struct vmbus_channel_msginfo *tmp = NULL;
- /*
- * Free up all the allocated messages.
- */
- list_for_each_entry_safe(pos, tmp,
- &msgheader->submsglist,
- msglistentry) {
-
- list_del(&pos->msglistentry);
- kfree(pos);
- }
-
- goto nomem;
- }
-
- msgbody->msgsize = msgsize;
- gpadl_body =
- (struct vmbus_channel_gpadl_body *)msgbody->msg;
+ sizeof(struct vmbus_channel_gpadl_body) +
+ pfncurr * sizeof(u64);
+ msgbody = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgbody) {
+ struct vmbus_channel_msginfo *pos = NULL;
+ struct vmbus_channel_msginfo *tmp = NULL;
/*
- * Gpadl is u32 and we are using a pointer which could
- * be 64-bit
- * This is governed by the guest/host protocol and
- * so the hypervisor guarantees that this is ok.
+ * Free up all the allocated messages.
*/
- for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
- kbuffer, size, send_offset, pfnsum + i);
-
- /* add to msg header */
- list_add_tail(&msgbody->msglistentry,
- &msgheader->submsglist);
- pfnsum += pfncurr;
- pfnleft -= pfncurr;
+ list_for_each_entry_safe(pos, tmp,
+ &msgheader->submsglist,
+ msglistentry) {
+
+ list_del(&pos->msglistentry);
+ kfree(pos);
+ }
+ kfree(msgheader);
+ return -ENOMEM;
}
- } else {
- /* everything fits in a header */
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pagecount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (msgheader == NULL)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
- for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
- type, kbuffer, size, send_offset, i);
-
- *msginfo = msgheader;
+
+ msgbody->msgsize = msgsize;
+ gpadl_body = (struct vmbus_channel_gpadl_body *)msgbody->msg;
+
+ /*
+ * Gpadl is u32 and we are using a pointer which could
+ * be 64-bit
+ * This is governed by the guest/host protocol and
+ * so the hypervisor guarantees that this is ok.
+ */
+ for (i = 0; i < pfncurr; i++)
+ gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
+ kbuffer, size, send_offset, pfnsum + i);
+
+ /* add to msg header */
+ list_add_tail(&msgbody->msglistentry, &msgheader->submsglist);
+ pfnsum += pfncurr;
+ pfnleft -= pfncurr;
}
return 0;
-nomem:
- kfree(msgheader);
- kfree(msgbody);
- return -ENOMEM;
}
/*
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 42aec2c5606a..9c97c4065fe7 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -296,6 +296,11 @@ static struct {
spinlock_t lock;
} host_ts;
+static bool timesync_implicit;
+
+module_param(timesync_implicit, bool, 0644);
+MODULE_PARM_DESC(timesync_implicit, "If set treat SAMPLE as SYNC when clock is behind");
+
static inline u64 reftime_to_ns(u64 reftime)
{
return (reftime - WLTIMEDELTA) * 100;
@@ -345,6 +350,29 @@ static void hv_set_host_time(struct work_struct *work)
}
/*
+ * Due to a bug on Hyper-V hosts, the sync flag may not always be sent on resume.
+ * Force a sync if the guest is behind.
+ */
+static inline bool hv_implicit_sync(u64 host_time)
+{
+ struct timespec64 new_ts;
+ struct timespec64 threshold_ts;
+
+ new_ts = ns_to_timespec64(reftime_to_ns(host_time));
+ ktime_get_real_ts64(&threshold_ts);
+
+ threshold_ts.tv_sec += 5;
+
+ /*
+ * If guest behind the host by 5 or more seconds.
+ */
+ if (timespec64_compare(&new_ts, &threshold_ts) >= 0)
+ return true;
+
+ return false;
+}
+
+/*
* Synchronize time with host after reboot, restore, etc.
*
* ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
@@ -384,7 +412,8 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
spin_unlock_irqrestore(&host_ts.lock, flags);
/* Schedule work to do do_settimeofday64() */
- if (adj_flags & ICTIMESYNCFLAG_SYNC)
+ if ((adj_flags & ICTIMESYNCFLAG_SYNC) ||
+ (timesync_implicit && hv_implicit_sync(host_ts.host_time)))
schedule_work(&adj_time_work);
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b33d5abd9beb..7f7965f3d187 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -988,7 +988,7 @@ static const struct dev_pm_ops vmbus_pm = {
};
/* The one and only one */
-static struct bus_type hv_bus = {
+static const struct bus_type hv_bus = {
.name = "vmbus",
.match = vmbus_match,
.shutdown = vmbus_shutdown,
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index f6e1e55e8292..4acc1858d8ac 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -195,6 +195,8 @@ struct aspeed_pwm_tacho_data {
u8 fan_tach_ch_source[MAX_ASPEED_FAN_TACH_CHANNELS];
struct aspeed_cooling_device *cdev[8];
const struct attribute_group *groups[3];
+ /* protects access to shared ASPEED_PTCR_RESULT */
+ struct mutex tach_lock;
};
enum type { TYPEM, TYPEN, TYPEO };
@@ -529,6 +531,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
u8 fan_tach_ch_source, type, mode, both;
int ret;
+ mutex_lock(&priv->tach_lock);
+
regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0);
regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch);
@@ -546,6 +550,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
ASPEED_RPM_STATUS_SLEEP_USEC,
usec);
+ mutex_unlock(&priv->tach_lock);
+
/* return -ETIMEDOUT if we didn't get an answer. */
if (ret)
return ret;
@@ -915,6 +921,7 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ mutex_init(&priv->tach_lock);
priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
&aspeed_pwm_tacho_regmap_config);
if (IS_ERR(priv->regmap))
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index ba82d1e79c13..b8fc8d1ef20d 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
+#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
@@ -419,7 +419,7 @@ static ssize_t show_temp(struct device *dev,
}
static int create_core_attrs(struct temp_data *tdata, struct device *dev,
- int attr_no)
+ int index)
{
int i;
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
@@ -431,13 +431,20 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
};
for (i = 0; i < tdata->attr_size; i++) {
+ /*
+ * We map the attr number to core id of the CPU
+ * The attr number is always core id + 2
+ * The Pkgtemp will always show up as temp1_*, if available
+ */
+ int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2;
+
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
"temp%d_%s", attr_no, suffixes[i]);
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = 0444;
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
- tdata->sd_attrs[i].index = attr_no;
+ tdata->sd_attrs[i].index = index;
tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
}
tdata->attr_group.attrs = tdata->attrs;
@@ -495,30 +502,25 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
- int err, index, attr_no;
+ int err, index;
if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
return 0;
/*
- * Find attr number for sysfs:
- * We map the attr number to core id of the CPU
- * The attr number is always core id + 2
- * The Pkgtemp will always show up as temp1_*, if available
+ * Get the index of tdata in pdata->core_data[]
+ * tdata for package: pdata->core_data[1]
+ * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1]
*/
if (pkg_flag) {
- attr_no = PKG_SYSFS_ATTR_NO;
+ index = PKG_SYSFS_ATTR_NO;
} else {
- index = ida_alloc(&pdata->ida, GFP_KERNEL);
+ index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL);
if (index < 0)
return index;
- pdata->cpu_map[index] = topology_core_id(cpu);
- attr_no = index + BASE_SYSFS_ATTR_NO;
- }
- if (attr_no > MAX_CORE_DATA - 1) {
- err = -ERANGE;
- goto ida_free;
+ pdata->cpu_map[index] = topology_core_id(cpu);
+ index += BASE_SYSFS_ATTR_NO;
}
tdata = init_temp_data(cpu, pkg_flag);
@@ -544,20 +546,20 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
if (get_ttarget(tdata, &pdev->dev) >= 0)
tdata->attr_size++;
- pdata->core_data[attr_no] = tdata;
+ pdata->core_data[index] = tdata;
/* Create sysfs interfaces */
- err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
+ err = create_core_attrs(tdata, pdata->hwmon_dev, index);
if (err)
goto exit_free;
return 0;
exit_free:
- pdata->core_data[attr_no] = NULL;
+ pdata->core_data[index] = NULL;
kfree(tdata);
ida_free:
if (!pkg_flag)
- ida_free(&pdata->ida, index);
+ ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO);
return err;
}
diff --git a/drivers/hwmon/gigabyte_waterforce.c b/drivers/hwmon/gigabyte_waterforce.c
index 85e523775714..8129d7b3ceaf 100644
--- a/drivers/hwmon/gigabyte_waterforce.c
+++ b/drivers/hwmon/gigabyte_waterforce.c
@@ -146,7 +146,7 @@ static int waterforce_get_status(struct waterforce_data *priv)
/* Send command for getting status */
ret = waterforce_write_expanded(priv, get_status_cmd, GET_STATUS_CMD_LENGTH);
if (ret < 0)
- return ret;
+ goto unlock_and_return;
ret = wait_for_completion_interruptible_timeout(&priv->status_report_received,
msecs_to_jiffies(STATUS_VALIDITY));
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 8d2ef3145bca..9fbab8f02334 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -3512,6 +3512,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp;
+ int num_reg_temp_config;
struct device *hwmon_dev;
struct sensor_template_group tsi_temp_tg;
@@ -3594,6 +3595,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6106_REG_TEMP_CRIT;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@@ -3669,6 +3671,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6106_REG_TEMP_CRIT;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@@ -3746,6 +3749,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6775_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6775_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6775_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6775_REG_TEMP_CRIT;
@@ -3821,6 +3825,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6776_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6776_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6776_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6776_REG_TEMP_CRIT;
@@ -3900,6 +3905,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
@@ -4034,6 +4040,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
@@ -4123,6 +4130,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6798_REG_TEMP_OVER;
reg_temp_hyst = NCT6798_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6798_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6798_REG_TEMP_CRIT;
@@ -4204,7 +4212,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
= reg_temp_crit[src - 1];
if (reg_temp_crit_l && reg_temp_crit_l[i])
data->reg_temp[4][src - 1] = reg_temp_crit_l[i];
- data->reg_temp_config[src - 1] = reg_temp_config[i];
+ if (i < num_reg_temp_config)
+ data->reg_temp_config[src - 1] = reg_temp_config[i];
data->temp_src[src - 1] = src;
continue;
}
@@ -4217,7 +4226,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
data->reg_temp[0][s] = reg_temp[i];
data->reg_temp[1][s] = reg_temp_over[i];
data->reg_temp[2][s] = reg_temp_hyst[i];
- data->reg_temp_config[s] = reg_temp_config[i];
+ if (i < num_reg_temp_config)
+ data->reg_temp_config[s] = reg_temp_config[i];
if (reg_temp_crit_h && reg_temp_crit_h[i])
data->reg_temp[3][s] = reg_temp_crit_h[i];
else if (reg_temp_crit[src - 1])
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
index b9bb469e2d8f..e5fa10b3b8bc 100644
--- a/drivers/hwmon/pmbus/mp2975.c
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -126,6 +126,21 @@ static const struct regulator_desc __maybe_unused mp2975_reg_desc[] = {
#define to_mp2975_data(x) container_of(x, struct mp2975_data, info)
+static int mp2975_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /*
+ * Report direct format as configured by MFR_DC_LOOP_CTRL.
+ * Unlike on MP2971/MP2973 the reported VOUT_MODE isn't automatically
+ * internally updated, but always reads as PB_VOUT_MODE_VID.
+ */
+ return PB_VOUT_MODE_DIRECT;
+ default:
+ return -ENODATA;
+ }
+}
+
static int
mp2975_read_word_helper(struct i2c_client *client, int page, int phase, u8 reg,
u16 mask)
@@ -869,6 +884,7 @@ static struct pmbus_driver_info mp2975_info = {
PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT |
PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT | PMBUS_PHASE_VIRTUAL,
+ .read_byte_data = mp2975_read_byte_data,
.read_word_data = mp2975_read_word_data,
#if IS_ENABLED(CONFIG_SENSORS_MP2975_REGULATOR)
.num_regulators = 1,
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3757b9391e60..aa0ee8ecd6f2 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -90,10 +90,8 @@ obj-$(CONFIG_I2C_NPCM) += i2c-npcm7xx.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
obj-$(CONFIG_I2C_OWL) += i2c-owl.o
-i2c-pasemi-objs := i2c-pasemi-core.o i2c-pasemi-pci.o
-obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
-i2c-apple-objs := i2c-pasemi-core.o i2c-pasemi-platform.o
-obj-$(CONFIG_I2C_APPLE) += i2c-apple.o
+obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi-core.o i2c-pasemi-pci.o
+obj-$(CONFIG_I2C_APPLE) += i2c-pasemi-core.o i2c-pasemi-platform.o
obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 3932e8d96a17..2c36b36d7d51 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -498,11 +498,10 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
/* Set block buffer mode */
outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
- inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
-
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
outb_p(len, SMBHSTDAT0(priv));
+ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
for (i = 0; i < len; i++)
outb_p(data->block[i+1], SMBBLKDAT(priv));
}
@@ -520,6 +519,7 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
}
data->block[0] = len;
+ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
for (i = 0; i < len; i++)
data->block[i + 1] = inb_p(SMBBLKDAT(priv));
}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 88a053987403..60e813137f84 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -803,6 +803,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
ctl &= ~I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+
+ /* flag the last byte as processed */
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_READ_PROCESSED, &value);
+
i2c_imx_slave_finish_op(i2c_imx);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index 7d54a9f34c74..bd8becbdeeb2 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -369,6 +369,7 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus)
return 0;
}
+EXPORT_SYMBOL_GPL(pasemi_i2c_common_probe);
irqreturn_t pasemi_irq_handler(int irq, void *dev_id)
{
@@ -378,3 +379,8 @@ irqreturn_t pasemi_irq_handler(int irq, void *dev_id)
complete(&smbus->irq_completion);
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(pasemi_irq_handler);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Olof Johansson <olof@lixom.net>");
+MODULE_DESCRIPTION("PA Semi PWRficient SMBus driver");
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 0d2e7171e3a6..da94df466e83 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -613,20 +613,20 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
peripheral.addr = msgs[i].addr;
+ ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
+ &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
+ if (ret)
+ goto err;
+
if (msgs[i].flags & I2C_M_RD) {
ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
&rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
if (ret)
goto err;
- }
-
- ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
- &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
- if (ret)
- goto err;
- if (msgs[i].flags & I2C_M_RD)
dma_async_issue_pending(gi2c->rx_c);
+ }
+
dma_async_issue_pending(gi2c->tx_c);
timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 91adcac875a4..c9d7afe489e8 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -219,10 +219,12 @@ config BMA400
config BMA400_I2C
tristate
+ select REGMAP_I2C
depends on BMA400
config BMA400_SPI
tristate
+ select REGMAP_SPI
depends on BMA400
config BMC150_ACCEL
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index feb86fe6c422..62490424b6ae 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -1821,7 +1821,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
{
struct device *dev = &st->spi->dev;
struct device_node *of_node = dev_of_node(dev);
- struct clk_init_data init;
+ struct clk_init_data init = {};
const char *clk_name;
int ret;
@@ -1891,10 +1891,14 @@ static int ad4130_setup(struct iio_dev *indio_dev)
return ret;
/*
- * Configure all GPIOs for output. If configured, the interrupt function
- * of P2 takes priority over the GPIO out function.
+ * Configure unused GPIOs for output. If configured, the interrupt
+ * function of P2 takes priority over the GPIO out function.
*/
- val = AD4130_IO_CONTROL_GPIO_CTRL_MASK;
+ val = 0;
+ for (i = 0; i < AD4130_MAX_GPIOS; i++)
+ if (st->pins_fn[i + AD4130_AIN2_P1] == AD4130_PIN_FN_NONE)
+ val |= FIELD_PREP(AD4130_IO_CONTROL_GPIO_CTRL_MASK, BIT(i));
+
val |= FIELD_PREP(AD4130_IO_CONTROL_INT_PIN_SEL_MASK, st->int_pin_sel);
ret = regmap_write(st->regmap, AD4130_IO_CONTROL_REG, val);
diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c
index 57700f124803..700564305057 100644
--- a/drivers/iio/adc/ad7091r8.c
+++ b/drivers/iio/adc/ad7091r8.c
@@ -195,7 +195,7 @@ static int ad7091r8_gpio_setup(struct ad7091r_state *st)
st->reset_gpio = devm_gpiod_get_optional(st->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(st->reset_gpio))
- return dev_err_probe(st->dev, PTR_ERR(st->convst_gpio),
+ return dev_err_probe(st->dev, PTR_ERR(st->reset_gpio),
"Error on requesting reset GPIO\n");
if (st->reset_gpio) {
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 2de5494e7c22..b15b7a3b66d5 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -48,6 +48,18 @@ config HDC2010
To compile this driver as a module, choose M here: the module
will be called hdc2010.
+config HDC3020
+ tristate "TI HDC3020 relative humidity and temperature sensor"
+ depends on I2C
+ select CRC8
+ help
+ Say yes here to build support for the Texas Instruments
+ HDC3020, HDC3021 and HDC3022 relative humidity and temperature
+ sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hdc3020.
+
config HID_SENSOR_HUMIDITY
tristate "HID Environmental humidity sensor"
depends on HID_SENSOR_HUB
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index f19ff3de97c5..5fbeef299f61 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
obj-$(CONFIG_HDC2010) += hdc2010.o
+obj-$(CONFIG_HDC3020) += hdc3020.o
obj-$(CONFIG_HID_SENSOR_HUMIDITY) += hid-sensor-humidity.o
hts221-y := hts221_core.o \
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
index 4e3311170725..ed70415512f6 100644
--- a/drivers/iio/humidity/hdc3020.c
+++ b/drivers/iio/humidity/hdc3020.c
@@ -322,7 +322,7 @@ static int hdc3020_read_raw(struct iio_dev *indio_dev,
if (chan->type != IIO_TEMP)
return -EINVAL;
- *val = 16852;
+ *val = -16852;
return IIO_VAL_INT;
default:
diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig
index 83e53acfbe88..c7f5866a177d 100644
--- a/drivers/iio/imu/bno055/Kconfig
+++ b/drivers/iio/imu/bno055/Kconfig
@@ -8,6 +8,7 @@ config BOSCH_BNO055
config BOSCH_BNO055_SERIAL
tristate "Bosch BNO055 attached via UART"
depends on SERIAL_DEV_BUS
+ select REGMAP
select BOSCH_BNO055
help
Enable this to support Bosch BNO055 IMUs attached via UART.
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 9a85752124dd..173dc00762a1 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1584,10 +1584,13 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
ret = iio_device_register_sysfs_group(indio_dev,
&iio_dev_opaque->chan_attr_group);
if (ret)
- goto error_clear_attrs;
+ goto error_free_chan_attrs;
return 0;
+error_free_chan_attrs:
+ kfree(iio_dev_opaque->chan_attr_group.attrs);
+ iio_dev_opaque->chan_attr_group.attrs = NULL;
error_clear_attrs:
iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 5cd27f04b45e..b6c4bef2a7bb 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -226,6 +226,7 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
case HID_USAGE_SENSOR_TIME_TIMESTAMP:
als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
*(s64 *)raw_data);
+ ret = 0;
break;
default:
break;
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 69938204456f..42b70cd42b39 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -530,6 +530,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
struct rm3100_data *data;
unsigned int tmp;
int ret;
+ int samp_rate_index;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
@@ -586,9 +587,14 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
ret = regmap_read(regmap, RM3100_REG_TMRC, &tmp);
if (ret < 0)
return ret;
+
+ samp_rate_index = tmp - RM3100_TMRC_OFFSET;
+ if (samp_rate_index < 0 || samp_rate_index >= RM3100_SAMP_NUM) {
+ dev_err(dev, "The value read from RM3100_REG_TMRC is invalid!\n");
+ return -EINVAL;
+ }
/* Initializing max wait time, which is double conversion time. */
- data->conversion_time = rm3100_samp_rates[tmp - RM3100_TMRC_OFFSET][2]
- * 2;
+ data->conversion_time = rm3100_samp_rates[samp_rate_index][2] * 2;
/* Cycle count values may not be what we want. */
if ((tmp - RM3100_TMRC_OFFSET) == 0)
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 433d6fac83c4..e8a5fed07e88 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -87,6 +87,7 @@ static const struct of_device_id bmp280_of_spi_match[] = {
MODULE_DEVICE_TABLE(of, bmp280_of_spi_match);
static const struct spi_device_id bmp280_spi_id[] = {
+ { "bmp085", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp180", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp181", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp280", (kernel_ulong_t)&bmp280_chip_info },
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 824349659d69..ce9c5bae83bf 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -401,6 +401,10 @@ static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
struct bnxt_re_fence_data *fence = &pd->fence;
struct ib_mr *ib_mr = &fence->mr->ib_mr;
struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+ struct bnxt_re_dev *rdev = pd->rdev;
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
memset(wqe, 0, sizeof(*wqe));
wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
@@ -455,6 +459,9 @@ static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
struct device *dev = &rdev->en_dev->pdev->dev;
struct bnxt_re_mr *mr = fence->mr;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
if (fence->mw) {
bnxt_re_dealloc_mw(fence->mw);
fence->mw = NULL;
@@ -486,6 +493,9 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
struct ib_mw *mw;
int rc;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
DMA_BIDIRECTIONAL);
rc = dma_mapping_error(dev, dma_addr);
@@ -1817,7 +1827,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
switch (srq_attr_mask) {
case IB_SRQ_MAX_WR:
/* SRQ resize is not supported */
- break;
+ return -EINVAL;
case IB_SRQ_LIMIT:
/* Change the SRQ threshold */
if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
@@ -1832,13 +1842,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
/* On success, update the shadow */
srq->srq_limit = srq_attr->srq_limit;
/* No need to Build and send response back to udata */
- break;
+ return 0;
default:
ibdev_err(&rdev->ibdev,
"Unsupported srq_attr_mask 0x%x", srq_attr_mask);
return -EINVAL;
}
- return 0;
}
int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
@@ -2556,11 +2565,6 @@ static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
- /* Need unconditional fence for local invalidate
- * opcode to work as expected.
- */
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
if (wr->send_flags & IB_SEND_SOLICITED)
@@ -2583,12 +2587,6 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
wqe->frmr.levels = qplib_frpl->hwq.level;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
- /* Need unconditional fence for reg_mr
- * opcode to function as expected.
- */
-
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->wr.send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
@@ -2719,6 +2717,18 @@ bad:
return rc;
}
+static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
+{
+ /* Need unconditional fence for non-wire memory opcode
+ * to work as expected.
+ */
+ if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+}
+
int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
@@ -2798,8 +2808,11 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
rc = -EINVAL;
goto bad;
}
- if (!rc)
+ if (!rc) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_re_legacy_set_uc_fence(&wqe);
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ }
bad:
if (rc) {
ibdev_err(&qp->rdev->ibdev,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index f022c922fae5..54b4d2f3a5d8 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -280,9 +280,6 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
{
-
- if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
- return;
rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index c98e04fe2ddd..439d0c7c5d0c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -744,7 +744,8 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
- srq->threshold = le16_to_cpu(sb->srq_limit);
+ if (!rc)
+ srq->threshold = le16_to_cpu(sb->srq_limit);
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
sbuf.sb, sbuf.dma_addr);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 68c621ff59d0..5a91cbda4aee 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -2086,7 +2086,7 @@ int init_credit_return(struct hfi1_devdata *dd)
"Unable to allocate credit return DMA range for NUMA %d\n",
i);
ret = -ENOMEM;
- goto done;
+ goto free_cr_base;
}
}
set_dev_node(&dd->pcidev->dev, dd->node);
@@ -2094,6 +2094,10 @@ int init_credit_return(struct hfi1_devdata *dd)
ret = 0;
done:
return ret;
+
+free_cr_base:
+ free_credit_return(dd);
+ goto done;
}
void free_credit_return(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 6e5ac2023328..b67d23b1f286 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
int rval = 0;
- if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
__sdma_txclean(dd, tx);
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index 8fb752f2eda2..2cb4b96db721 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -346,6 +346,7 @@ enum irdma_cqp_op_type {
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
#define IRDMA_AE_RESET_SENT 0x0601
#define IRDMA_AE_TERMINATE_SENT 0x0602
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index bd4b2b896444..ad50b77282f8 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -387,6 +387,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
@@ -570,6 +571,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
irq_update_affinity_hint(msix_vec->irq, NULL);
free_irq(msix_vec->irq, dev_id);
+ if (rf == dev_id) {
+ tasklet_kill(&rf->dpc_tasklet);
+ } else {
+ struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
+
+ tasklet_kill(&iwceq->dpc_tasklet);
+ }
}
/**
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index b5eb8d421988..0b046c061742 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -839,7 +839,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
- init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
+ init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
+ init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
+ init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
@@ -2184,9 +2186,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
info.cq_base_pa = iwcq->kmem.pa;
}
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
- info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
- (u32)IRDMA_MAX_CQ_READ_THRESH);
+ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+ (u32)IRDMA_MAX_CQ_READ_THRESH);
if (irdma_sc_cq_init(cq, &info)) {
ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index f87531318feb..a78a067e3ce7 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -458,6 +458,12 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
+ if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID ||
+ i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP))
+ if (!MLX5_CAP_GEN(mdev, roce) ||
+ !MLX5_CAP_ROCE(mdev, roce_cc_general))
+ continue;
+
dbg_cc_params->params[i].offset = i;
dbg_cc_params->params[i].dev = dev;
dbg_cc_params->params[i].port_num = port_num;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 869369cb5b5f..253fea374a72 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2949,7 +2949,7 @@ DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
UVERBS_IDR_ANY_OBJECT,
- UVERBS_ACCESS_WRITE,
+ UVERBS_ACCESS_READ,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(
MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index df1d1b0a3ef7..9947feb7fb8a 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
*/
copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
left);
- memcpy(eseg->inline_hdr.start, pdata, copysz);
+ memcpy(eseg->inline_hdr.data, pdata, copysz);
stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
sizeof(eseg->inline_hdr.start) + copysz, 16);
*size += stride / 16;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 7887a6786ed4..f118ce0a9a61 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
/* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
ureq.rq_len, true, 0, alloc_and_init);
- if (rc)
+ if (rc) {
+ ib_umem_release(qp->usq.umem);
+ qp->usq.umem = NULL;
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info,
+ qp->usq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ }
return rc;
+ }
}
memset(&in_params, 0, sizeof(in_params));
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 58f70cfec45a..040234c01be4 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -79,12 +79,16 @@ module_param(srpt_srq_size, int, 0444);
MODULE_PARM_DESC(srpt_srq_size,
"Shared receive queue (SRQ) size.");
+static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp)
+{
+ return kstrtou64(buffer, 16, (u64 *)kp->arg);
+}
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
}
-module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
- 0444);
+module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x,
+ &srpt_service_guid, 0444);
MODULE_PARM_DESC(srpt_service_guid,
"Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
@@ -210,10 +214,12 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
/**
* srpt_qp_event - QP event callback function
* @event: Description of the event that occurred.
- * @ch: SRPT RDMA channel.
+ * @ptr: SRPT RDMA channel.
*/
-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+static void srpt_qp_event(struct ib_event *event, void *ptr)
{
+ struct srpt_rdma_ch *ch = ptr;
+
pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
event->event, ch, ch->sess_name, ch->qp->qp_num,
get_ch_state_name(ch->state));
@@ -1807,8 +1813,7 @@ retry:
ch->cq_size = ch->rq_size + sq_size;
qp_init->qp_context = (void *)ch;
- qp_init->event_handler
- = (void(*)(struct ib_event *, void*))srpt_qp_event;
+ qp_init->event_handler = srpt_qp_event;
qp_init->send_cq = ch->cq;
qp_init->recv_cq = ch->cq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index b1244d7df6cc..7c4b2a5cc1b5 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -294,6 +294,7 @@ static const struct xpad_device {
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -491,6 +492,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x15e4), /* Numark Xbox 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech Xbox 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 13ef6284223d..7f67f9f2946b 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -811,7 +811,6 @@ static int atkbd_probe(struct atkbd *atkbd)
{
struct ps2dev *ps2dev = &atkbd->ps2dev;
unsigned char param[2];
- bool skip_getid;
/*
* Some systems, where the bit-twiddling when testing the io-lines of the
@@ -825,6 +824,11 @@ static int atkbd_probe(struct atkbd *atkbd)
"keyboard reset failed on %s\n",
ps2dev->serio->phys);
+ if (atkbd_skip_getid(atkbd)) {
+ atkbd->id = 0xab83;
+ goto deactivate_kbd;
+ }
+
/*
* Then we check the keyboard ID. We should get 0xab83 under normal conditions.
* Some keyboards report different values, but the first byte is always 0xab or
@@ -833,18 +837,17 @@ static int atkbd_probe(struct atkbd *atkbd)
*/
param[0] = param[1] = 0xa5; /* initialize with invalid values */
- skip_getid = atkbd_skip_getid(atkbd);
- if (skip_getid || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
+ if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
/*
- * If the get ID command was skipped or failed, we check if we can at least set
+ * If the get ID command failed, we check if we can at least set
* the LEDs on the keyboard. This should work on every keyboard out there.
* It also turns the LEDs off, which we want anyway.
*/
param[0] = 0;
if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
return -1;
- atkbd->id = skip_getid ? 0xab83 : 0xabba;
+ atkbd->id = 0xabba;
return 0;
}
@@ -860,6 +863,7 @@ static int atkbd_probe(struct atkbd *atkbd)
return -1;
}
+deactivate_kbd:
/*
* Make sure nothing is coming from the keyboard and disturbs our
* internal state.
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ca150618d32f..953992b458e9 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -19,6 +19,7 @@
* Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch)
*/
+#include "linux/usb.h"
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -193,6 +194,8 @@ enum tp_type {
/* list of device capability bits */
#define HAS_INTEGRATED_BUTTON 1
+/* maximum number of supported endpoints (currently trackpad and button) */
+#define MAX_ENDPOINTS 2
/* trackpad finger data block size */
#define FSIZE_TYPE1 (14 * sizeof(__le16))
@@ -891,6 +894,18 @@ static int bcm5974_resume(struct usb_interface *iface)
return error;
}
+static bool bcm5974_check_endpoints(struct usb_interface *iface,
+ const struct bcm5974_config *cfg)
+{
+ u8 ep_addr[MAX_ENDPOINTS + 1] = {0};
+
+ ep_addr[0] = cfg->tp_ep;
+ if (cfg->tp_type == TYPE1)
+ ep_addr[1] = cfg->bt_ep;
+
+ return usb_check_int_endpoints(iface, ep_addr);
+}
+
static int bcm5974_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
@@ -903,6 +918,11 @@ static int bcm5974_probe(struct usb_interface *iface,
/* find the product index */
cfg = bcm5974_get_config(udev);
+ if (!bcm5974_check_endpoints(iface, cfg)) {
+ dev_err(&iface->dev, "Unexpected non-int endpoint\n");
+ return -ENODEV;
+ }
+
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL);
input_dev = input_allocate_device();
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index b585b1dab870..dfc6c581873b 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -635,6 +635,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOAUX)
},
{
+ /* Fujitsu Lifebook U728 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U728"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+ },
+ {
/* Gigabyte M912 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
@@ -1210,6 +1218,12 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NS5x_7xPU"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index af32fbe57b63..b068ff8afbc9 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -884,7 +884,8 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
}
}
- if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
+ /* Some devices with gpio_int_idx 0 list a third unused GPIO */
+ if ((ts->gpio_count == 2 || ts->gpio_count == 3) && ts->gpio_int_idx == 0) {
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
gpio_mapping = acpi_goodix_int_first_gpios;
} else if (ts->gpio_count == 2 && ts->gpio_int_idx == 1) {
diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
index 20331e119beb..03d626776ba1 100644
--- a/drivers/interconnect/qcom/sc8180x.c
+++ b/drivers/interconnect/qcom/sc8180x.c
@@ -1372,6 +1372,7 @@ static struct qcom_icc_bcm bcm_mm0 = {
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
+ .keepalive = true,
.num_nodes = 1,
.nodes = { &slv_qns_cdsp_mem_noc }
};
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
index 629faa4c9aae..fc22cecf650f 100644
--- a/drivers/interconnect/qcom/sm8550.c
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -2223,6 +2223,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8550",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/interconnect/qcom/sm8650.c b/drivers/interconnect/qcom/sm8650.c
index b83de54577b6..b962e6c233ef 100644
--- a/drivers/interconnect/qcom/sm8650.c
+++ b/drivers/interconnect/qcom/sm8650.c
@@ -1160,7 +1160,7 @@ static struct qcom_icc_node qns_gemnoc_sf = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
- .enable_mask = BIT(3),
+ .enable_mask = BIT(0),
.num_nodes = 1,
.nodes = { &ebi },
};
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
index d19501d913b3..cbaf4f9c41be 100644
--- a/drivers/interconnect/qcom/x1e80100.c
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -1586,6 +1586,7 @@ static struct qcom_icc_node qns_pcie_south_gem_noc_pcie = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.num_nodes = 1,
.nodes = { &ebi },
};
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 05722121f00e..4a27fbdb2d84 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -292,10 +292,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
- unsigned long flags;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
- struct arm_smmu_master *master;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
@@ -325,28 +323,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
goto err_free_cd;
}
- spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
- cd);
- if (ret) {
- list_for_each_entry_from_reverse(
- master, &smmu_domain->devices, domain_head)
- arm_smmu_write_ctx_desc(
- master, mm_get_enqcmd_pasid(mm), NULL);
- break;
- }
- }
- spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- if (ret)
- goto err_put_notifier;
-
list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
return smmu_mn;
-err_put_notifier:
- /* Frees smmu_mn */
- mmu_notifier_put(&smmu_mn->mn);
err_free_cd:
arm_smmu_free_shared_cd(cd);
return ERR_PTR(ret);
@@ -363,9 +342,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
list_del(&smmu_mn->list);
- arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
- NULL);
-
/*
* If we went through clear(), we've already invalidated, and no
* new TLB entry can have been formed.
@@ -381,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
arm_smmu_free_shared_cd(cd);
}
-static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
+static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
+ struct mm_struct *mm)
{
int ret;
struct arm_smmu_bond *bond;
@@ -404,9 +381,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
goto err_free_bond;
}
+ ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
+ if (ret)
+ goto err_put_notifier;
+
list_add(&bond->list, &master->bonds);
return 0;
+err_put_notifier:
+ arm_smmu_mmu_notifier_put(bond->smmu_mn);
err_free_bond:
kfree(bond);
return ret;
@@ -568,6 +551,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
mutex_lock(&sva_lock);
+
+ arm_smmu_write_ctx_desc(master, id, NULL);
+
list_for_each_entry(t, &master->bonds, list) {
if (t->mm == mm) {
bond = t;
@@ -590,7 +576,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct mm_struct *mm = domain->mm;
mutex_lock(&sva_lock);
- ret = __arm_smmu_sva_bind(dev, mm);
+ ret = __arm_smmu_sva_bind(dev, id, mm);
mutex_unlock(&sva_lock);
return ret;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 68b6bc5e7c71..6317aaf7b3ab 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -859,10 +859,14 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
arm_smmu_rpm_put(smmu);
}
-static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED) {
+ if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
+ return NULL;
+ }
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -875,15 +879,6 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock);
- if (dev) {
- struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
-
- if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
- kfree(smmu_domain);
- return NULL;
- }
- }
-
return &smmu_domain->domain;
}
@@ -1600,7 +1595,7 @@ static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
- .domain_alloc_paging = arm_smmu_domain_alloc_paging,
+ .domain_alloc = arm_smmu_domain_alloc,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 6fb5f6fceea1..11652e0bcab3 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -396,8 +396,6 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
-static void domain_update_iotlb(struct dmar_domain *domain);
-
/* Return the super pagesize bitmap if supported. */
static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
{
@@ -1218,7 +1216,7 @@ domain_lookup_dev_info(struct dmar_domain *domain,
return NULL;
}
-static void domain_update_iotlb(struct dmar_domain *domain)
+void domain_update_iotlb(struct dmar_domain *domain)
{
struct dev_pasid_info *dev_pasid;
struct device_domain_info *info;
@@ -1368,6 +1366,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
spin_unlock_irqrestore(&domain->lock, flags);
}
+static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ unsigned long pfn, unsigned int pages,
+ int ih)
+{
+ unsigned int aligned_pages = __roundup_pow_of_two(pages);
+ unsigned long bitmask = aligned_pages - 1;
+ unsigned int mask = ilog2(aligned_pages);
+ u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
+
+ /*
+ * PSI masks the low order bits of the base address. If the
+ * address isn't aligned to the mask, then compute a mask value
+ * needed to ensure the target range is flushed.
+ */
+ if (unlikely(bitmask & pfn)) {
+ unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+ /*
+ * Since end_pfn <= pfn + bitmask, the only way bits
+ * higher than bitmask can differ in pfn and end_pfn is
+ * by carrying. This means after masking out bitmask,
+ * high bits starting with the first set bit in
+ * shared_bits are all equal in both pfn and end_pfn.
+ */
+ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+ }
+
+ /*
+ * Fallback to domain selective flush if no PSI support or
+ * the size is too big.
+ */
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+ DMA_TLB_PSI_FLUSH);
+}
+
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
@@ -1384,42 +1422,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (ih)
ih = 1 << 6;
- if (domain->use_first_level) {
+ if (domain->use_first_level)
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
- } else {
- unsigned long bitmask = aligned_pages - 1;
-
- /*
- * PSI masks the low order bits of the base address. If the
- * address isn't aligned to the mask, then compute a mask value
- * needed to ensure the target range is flushed.
- */
- if (unlikely(bitmask & pfn)) {
- unsigned long end_pfn = pfn + pages - 1, shared_bits;
-
- /*
- * Since end_pfn <= pfn + bitmask, the only way bits
- * higher than bitmask can differ in pfn and end_pfn is
- * by carrying. This means after masking out bitmask,
- * high bits starting with the first set bit in
- * shared_bits are all equal in both pfn and end_pfn.
- */
- shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
- mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
- }
-
- /*
- * Fallback to domain selective flush if no PSI support or
- * the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) ||
- mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
- }
+ else
+ __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
/*
* In caching mode, changes of pages from non-present to present require
@@ -1443,6 +1449,46 @@ static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *
iommu_flush_write_buffer(iommu);
}
+/*
+ * Flush the relevant caches in nested translation if the domain
+ * also serves as a parent
+ */
+static void parent_domain_flush(struct dmar_domain *domain,
+ unsigned long pfn,
+ unsigned long pages, int ih)
+{
+ struct dmar_domain *s1_domain;
+
+ spin_lock(&domain->s1_lock);
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ struct device_domain_info *device_info;
+ struct iommu_domain_info *info;
+ unsigned long flags;
+ unsigned long i;
+
+ xa_for_each(&s1_domain->iommu_array, i, info)
+ __iommu_flush_iotlb_psi(info->iommu, info->did,
+ pfn, pages, ih);
+
+ if (!s1_domain->has_iotlb_device)
+ continue;
+
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ list_for_each_entry(device_info, &s1_domain->devices, link)
+ /*
+ * Address translation cache in device side caches the
+ * result of nested translation. There is no easy way
+ * to identify the exact set of nested translations
+ * affected by a change in S2. So just flush the entire
+ * device cache.
+ */
+ __iommu_flush_dev_iotlb(device_info, 0,
+ MAX_AGAW_PFN_WIDTH);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ }
+ spin_unlock(&domain->s1_lock);
+}
+
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
@@ -1462,6 +1508,9 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
}
+
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, 0, -1, 0);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1985,6 +2034,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
+ if (domain->nested_parent)
+ parent_domain_flush(domain, start_pfn,
+ lvl_pages, 0);
}
pte++;
@@ -3883,6 +3935,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
struct intel_iommu *iommu = info->iommu;
+ struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
/* Must be NESTING domain */
@@ -3908,11 +3961,16 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
if (!domain)
return ERR_PTR(-ENOMEM);
- if (nested_parent)
- to_dmar_domain(domain)->nested_parent = true;
+ dmar_domain = to_dmar_domain(domain);
+
+ if (nested_parent) {
+ dmar_domain->nested_parent = true;
+ INIT_LIST_HEAD(&dmar_domain->s1_domains);
+ spin_lock_init(&dmar_domain->s1_lock);
+ }
if (dirty_tracking) {
- if (to_dmar_domain(domain)->use_first_level) {
+ if (dmar_domain->use_first_level) {
iommu_domain_free(domain);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -3924,8 +3982,12 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+ WARN_ON(dmar_domain->nested_parent &&
+ !list_empty(&dmar_domain->s1_domains));
if (domain != &si_domain->domain)
- domain_exit(to_dmar_domain(domain));
+ domain_exit(dmar_domain);
}
int prepare_domain_attach_device(struct iommu_domain *domain,
@@ -4107,6 +4169,9 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, start_pfn, nrpages,
+ list_empty(&gather->freelist));
put_pages_list(&gather->freelist);
}
@@ -4664,21 +4729,70 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
return vtd;
}
+/*
+ * Set dirty tracking for the device list of a domain. The caller must
+ * hold the domain->lock when calling it.
+ */
+static int device_set_dirty_tracking(struct list_head *devices, bool enable)
+{
+ struct device_domain_info *info;
+ int ret = 0;
+
+ list_for_each_entry(info, devices, link) {
+ ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev,
+ IOMMU_NO_PASID, enable);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int parent_domain_set_dirty_tracking(struct dmar_domain *domain,
+ bool enable)
+{
+ struct dmar_domain *s1_domain;
+ unsigned long flags;
+ int ret;
+
+ spin_lock(&domain->s1_lock);
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ ret = device_set_dirty_tracking(&s1_domain->devices, enable);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ if (ret)
+ goto err_unwind;
+ }
+ spin_unlock(&domain->s1_lock);
+ return 0;
+
+err_unwind:
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ device_set_dirty_tracking(&s1_domain->devices,
+ domain->dirty_tracking);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ }
+ spin_unlock(&domain->s1_lock);
+ return ret;
+}
+
static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct device_domain_info *info;
int ret;
spin_lock(&dmar_domain->lock);
if (dmar_domain->dirty_tracking == enable)
goto out_unlock;
- list_for_each_entry(info, &dmar_domain->devices, link) {
- ret = intel_pasid_setup_dirty_tracking(info->iommu,
- info->domain, info->dev,
- IOMMU_NO_PASID, enable);
+ ret = device_set_dirty_tracking(&dmar_domain->devices, enable);
+ if (ret)
+ goto err_unwind;
+
+ if (dmar_domain->nested_parent) {
+ ret = parent_domain_set_dirty_tracking(dmar_domain, enable);
if (ret)
goto err_unwind;
}
@@ -4690,10 +4804,8 @@ out_unlock:
return 0;
err_unwind:
- list_for_each_entry(info, &dmar_domain->devices, link)
- intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain,
- info->dev, IOMMU_NO_PASID,
- dmar_domain->dirty_tracking);
+ device_set_dirty_tracking(&dmar_domain->devices,
+ dmar_domain->dirty_tracking);
spin_unlock(&dmar_domain->lock);
return ret;
}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index d02f916d8e59..4145c04cb1c6 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -627,6 +627,10 @@ struct dmar_domain {
int agaw;
/* maximum mapped address */
u64 max_addr;
+ /* Protect the s1_domains list */
+ spinlock_t s1_lock;
+ /* Track s1_domains nested on this domain */
+ struct list_head s1_domains;
};
/* Nested user domain */
@@ -637,6 +641,8 @@ struct dmar_domain {
unsigned long s1_pgtbl;
/* page table attributes */
struct iommu_hwpt_vtd_s1 s1_cfg;
+ /* link to parent domain siblings */
+ struct list_head s2_link;
};
};
@@ -1060,6 +1066,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
*/
#define QI_OPT_WAIT_DRAIN BIT(0)
+void domain_update_iotlb(struct dmar_domain *domain);
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void device_block_translation(struct device *dev);
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index f26c7f1c46cc..a7d68f3d518a 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -65,12 +65,20 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
list_add(&info->link, &dmar_domain->devices);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ domain_update_iotlb(dmar_domain);
+
return 0;
}
static void intel_nested_domain_free(struct iommu_domain *domain)
{
- kfree(to_dmar_domain(domain));
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct dmar_domain *s2_domain = dmar_domain->s2_domain;
+
+ spin_lock(&s2_domain->s1_lock);
+ list_del(&dmar_domain->s2_link);
+ spin_unlock(&s2_domain->s1_lock);
+ kfree(dmar_domain);
}
static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
@@ -95,7 +103,7 @@ static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
}
static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
- unsigned long npages, bool ih)
+ u64 npages, bool ih)
{
struct iommu_domain_info *info;
unsigned int mask;
@@ -201,5 +209,9 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
spin_lock_init(&domain->lock);
xa_init(&domain->iommu_array);
+ spin_lock(&s2_domain->s1_lock);
+ list_add(&domain->s2_link, &s2_domain->s1_domains);
+ spin_unlock(&s2_domain->s1_lock);
+
return &domain->domain;
}
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 3239cefa4c33..108158e2b907 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -428,7 +428,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
* Set up dirty tracking on a second only or nested translation type.
*/
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid,
bool enabled)
{
@@ -445,7 +444,7 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
return -ENODEV;
}
- did = domain_id_iommu(domain, iommu);
+ did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
if (pgtt != PASID_ENTRY_PGTT_SL_ONLY &&
pgtt != PASID_ENTRY_PGTT_NESTED) {
@@ -658,6 +657,8 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, s2_domain->agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ if (s2_domain->dirty_tracking)
+ pasid_set_ssade(pte);
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
pasid_set_present(pte);
spin_unlock(&iommu->lock);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 8d40d4c66e31..487ede039bdd 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -307,7 +307,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid);
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid,
bool enabled);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index c3fc9201d0be..65814cbc8402 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -41,6 +41,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
}
iommu_mm->pasid = pasid;
INIT_LIST_HEAD(&iommu_mm->sva_domains);
+ INIT_LIST_HEAD(&iommu_mm->sva_handles);
/*
* Make sure the write to mm->iommu_mm is not reordered in front of
* initialization to iommu_mm fields. If it does, readers may see a
@@ -82,6 +83,14 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
goto out_unlock;
}
+ list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
+ if (handle->dev == dev) {
+ refcount_inc(&handle->users);
+ mutex_unlock(&iommu_sva_lock);
+ return handle;
+ }
+ }
+
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle) {
ret = -ENOMEM;
@@ -111,6 +120,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
list_add(&domain->next, &mm->iommu_mm->sva_domains);
out:
+ refcount_set(&handle->users, 1);
+ list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
mutex_unlock(&iommu_sva_lock);
handle->dev = dev;
handle->domain = domain;
@@ -141,6 +152,12 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
struct device *dev = handle->dev;
mutex_lock(&iommu_sva_lock);
+ if (!refcount_dec_and_test(&handle->users)) {
+ mutex_unlock(&iommu_sva_lock);
+ return;
+ }
+ list_del(&handle->handle_item);
+
iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
if (--domain->users == 0) {
list_del(&domain->next);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 68e648b55767..d14413916f93 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1799,7 +1799,7 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
* domain. Do not use in new drivers.
*/
if (ops->default_domain) {
- if (req_type)
+ if (req_type != ops->default_domain->type)
return ERR_PTR(-EINVAL);
return ops->default_domain;
}
@@ -1871,10 +1871,18 @@ static int iommu_get_def_domain_type(struct iommu_group *group,
const struct iommu_ops *ops = dev_iommu_ops(dev);
int type;
- if (!ops->def_domain_type)
- return cur_type;
-
- type = ops->def_domain_type(dev);
+ if (ops->default_domain) {
+ /*
+ * Drivers that declare a global static default_domain will
+ * always choose that.
+ */
+ type = ops->default_domain->type;
+ } else {
+ if (ops->def_domain_type)
+ type = ops->def_domain_type(dev);
+ else
+ return cur_type;
+ }
if (!type || cur_type == type)
return cur_type;
if (!cur_type)
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 3f3f1fa1a0a9..33d142f8057d 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -263,7 +263,8 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
if (cmd->__reserved)
return -EOPNOTSUPP;
- if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len)
+ if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
+ (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
return -EINVAL;
idev = iommufd_get_device(ucmd, cmd->dev_id);
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 504ac1b01b2d..05fd9d3abf1b 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -1330,20 +1330,23 @@ out_unlock:
int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access)
{
+ u32 new_id;
int rc;
down_write(&iopt->domains_rwsem);
down_write(&iopt->iova_rwsem);
- rc = xa_alloc(&iopt->access_list, &access->iopt_access_list_id, access,
- xa_limit_16b, GFP_KERNEL_ACCOUNT);
+ rc = xa_alloc(&iopt->access_list, &new_id, access, xa_limit_16b,
+ GFP_KERNEL_ACCOUNT);
+
if (rc)
goto out_unlock;
rc = iopt_calculate_iova_alignment(iopt);
if (rc) {
- xa_erase(&iopt->access_list, access->iopt_access_list_id);
+ xa_erase(&iopt->access_list, new_id);
goto out_unlock;
}
+ access->iopt_access_list_id = new_id;
out_unlock:
up_write(&iopt->iova_rwsem);
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index 482d4059f5db..e854d3f67205 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -45,6 +45,7 @@ enum {
enum {
MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0,
+ MOCK_FLAGS_DEVICE_HUGE_IOVA = 1 << 1,
};
enum {
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index 0a92c9eeaf7f..db8c46bee155 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -100,7 +100,7 @@ struct iova_bitmap {
struct iova_bitmap_map mapped;
/* userspace address of the bitmap */
- u64 __user *bitmap;
+ u8 __user *bitmap;
/* u64 index that @mapped points to */
unsigned long mapped_base_index;
@@ -113,6 +113,9 @@ struct iova_bitmap {
/* length of the IOVA range for the whole bitmap */
size_t length;
+
+ /* length of the IOVA range set ahead the pinned pages */
+ unsigned long set_ahead_length;
};
/*
@@ -162,7 +165,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
{
struct iova_bitmap_map *mapped = &bitmap->mapped;
unsigned long npages;
- u64 __user *addr;
+ u8 __user *addr;
long ret;
/*
@@ -176,17 +179,18 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
sizeof(*bitmap->bitmap), PAGE_SIZE);
/*
- * We always cap at max number of 'struct page' a base page can fit.
- * This is, for example, on x86 means 2M of bitmap data max.
- */
- npages = min(npages, PAGE_SIZE / sizeof(struct page *));
-
- /*
* Bitmap address to be pinned is calculated via pointer arithmetic
* with bitmap u64 word index.
*/
addr = bitmap->bitmap + bitmap->mapped_base_index;
+ /*
+ * We always cap at max number of 'struct page' a base page can fit.
+ * This is, for example, on x86 means 2M of bitmap data max.
+ */
+ npages = min(npages + !!offset_in_page(addr),
+ PAGE_SIZE / sizeof(struct page *));
+
ret = pin_user_pages_fast((unsigned long)addr, npages,
FOLL_WRITE, mapped->pages);
if (ret <= 0)
@@ -247,7 +251,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
mapped = &bitmap->mapped;
mapped->pgshift = __ffs(page_size);
- bitmap->bitmap = data;
+ bitmap->bitmap = (u8 __user *)data;
bitmap->mapped_total_index =
iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
bitmap->iova = iova;
@@ -304,7 +308,7 @@ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
remaining = min_t(unsigned long, remaining,
- bytes / sizeof(*bitmap->bitmap));
+ DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap)));
return remaining;
}
@@ -341,6 +345,32 @@ static bool iova_bitmap_done(struct iova_bitmap *bitmap)
return bitmap->mapped_base_index >= bitmap->mapped_total_index;
}
+static int iova_bitmap_set_ahead(struct iova_bitmap *bitmap,
+ size_t set_ahead_length)
+{
+ int ret = 0;
+
+ while (set_ahead_length > 0 && !iova_bitmap_done(bitmap)) {
+ unsigned long length = iova_bitmap_mapped_length(bitmap);
+ unsigned long iova = iova_bitmap_mapped_iova(bitmap);
+
+ ret = iova_bitmap_get(bitmap);
+ if (ret)
+ break;
+
+ length = min(length, set_ahead_length);
+ iova_bitmap_set(bitmap, iova, length);
+
+ set_ahead_length -= length;
+ bitmap->mapped_base_index +=
+ iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
+ iova_bitmap_put(bitmap);
+ }
+
+ bitmap->set_ahead_length = 0;
+ return ret;
+}
+
/*
* Advances to the next range, releases the current pinned
* pages and pins the next set of bitmap pages.
@@ -357,6 +387,15 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap)
if (iova_bitmap_done(bitmap))
return 0;
+ /* Iterate, set and skip any bits requested for next iteration */
+ if (bitmap->set_ahead_length) {
+ int ret;
+
+ ret = iova_bitmap_set_ahead(bitmap, bitmap->set_ahead_length);
+ if (ret)
+ return ret;
+ }
+
/* When advancing the index we pin the next set of bitmap pages */
return iova_bitmap_get(bitmap);
}
@@ -409,6 +448,7 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
+ unsigned long last_page_idx = mapped->npages - 1;
do {
unsigned int page_idx = cur_bit / BITS_PER_PAGE;
@@ -417,10 +457,18 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
last_bit - cur_bit + 1);
void *kaddr;
+ if (unlikely(page_idx > last_page_idx))
+ break;
+
kaddr = kmap_local_page(mapped->pages[page_idx]);
bitmap_set(kaddr, offset, nbits);
kunmap_local(kaddr);
cur_bit += nbits;
} while (cur_bit <= last_bit);
+
+ if (unlikely(cur_bit <= last_bit)) {
+ bitmap->set_ahead_length =
+ ((last_bit - cur_bit + 1) << bitmap->mapped.pgshift);
+ }
}
EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD);
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index d9e9920c7eba..7a2199470f31 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -36,11 +36,12 @@ static struct mock_bus_type iommufd_mock_bus_type = {
},
};
-static atomic_t mock_dev_num;
+static DEFINE_IDA(mock_dev_ida);
enum {
MOCK_DIRTY_TRACK = 1,
MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
+ MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
/*
* Like a real page table alignment requires the low bits of the address
@@ -53,6 +54,7 @@ enum {
MOCK_PFN_START_IOVA = _MOCK_PFN_START,
MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
+ MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
/*
@@ -61,8 +63,8 @@ enum {
* In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
* value. This has a much smaller randomization space and syzkaller can hit it.
*/
-static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
- u64 *iova)
+static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
+ u64 *iova)
{
struct syz_layout {
__u32 nth_area;
@@ -86,6 +88,21 @@ static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
return 0;
}
+static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
+ u64 *iova)
+{
+ unsigned long ret;
+
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return 0;
+ }
+ ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
+ mutex_unlock(&access->ioas_lock);
+ return ret;
+}
+
void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id, u64 *iova, u32 *flags)
{
@@ -98,7 +115,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
if (IS_ERR(ioas))
return;
- *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova);
+ *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
iommufd_put_object(ucmd->ictx, &ioas->obj);
}
@@ -121,6 +138,7 @@ enum selftest_obj_type {
struct mock_dev {
struct device dev;
unsigned long flags;
+ int id;
};
struct selftest_obj {
@@ -191,6 +209,34 @@ static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
return 0;
}
+static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
+ unsigned long iova, size_t page_size,
+ unsigned long flags)
+{
+ unsigned long cur, end = iova + page_size - 1;
+ bool dirty = false;
+ void *ent, *old;
+
+ for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
+ ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
+ if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
+ continue;
+
+ dirty = true;
+ /* Clear dirty */
+ if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
+ unsigned long val;
+
+ val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
+ old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
+ xa_mk_value(val), GFP_KERNEL);
+ WARN_ON_ONCE(ent != old);
+ }
+ }
+
+ return dirty;
+}
+
static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
unsigned long iova, size_t size,
unsigned long flags,
@@ -198,31 +244,31 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
{
struct mock_iommu_domain *mock =
container_of(domain, struct mock_iommu_domain, domain);
- unsigned long i, max = size / MOCK_IO_PAGE_SIZE;
- void *ent, *old;
+ unsigned long end = iova + size;
+ void *ent;
if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
return -EINVAL;
- for (i = 0; i < max; i++) {
- unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE;
+ do {
+ unsigned long pgsize = MOCK_IO_PAGE_SIZE;
+ unsigned long head;
- ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
- if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) {
- /* Clear dirty */
- if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
- unsigned long val;
-
- val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
- old = xa_store(&mock->pfns,
- cur / MOCK_IO_PAGE_SIZE,
- xa_mk_value(val), GFP_KERNEL);
- WARN_ON_ONCE(ent != old);
- }
- iommu_dirty_bitmap_record(dirty, cur,
- MOCK_IO_PAGE_SIZE);
+ ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+ if (!ent) {
+ iova += pgsize;
+ continue;
}
- }
+
+ if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
+ pgsize = MOCK_HUGE_PAGE_SIZE;
+ head = iova & ~(pgsize - 1);
+
+ /* Clear dirty */
+ if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
+ iommu_dirty_bitmap_record(dirty, head, pgsize);
+ iova = head + pgsize;
+ } while (iova < end);
return 0;
}
@@ -234,6 +280,7 @@ const struct iommu_dirty_ops dirty_ops = {
static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
{
+ struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
struct mock_iommu_domain *mock;
mock = kzalloc(sizeof(*mock), GFP_KERNEL);
@@ -242,6 +289,8 @@ static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
+ if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
+ mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
mock->domain.ops = mock_ops.default_domain_ops;
mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
xa_init(&mock->pfns);
@@ -287,7 +336,7 @@ mock_domain_alloc_user(struct device *dev, u32 flags,
return ERR_PTR(-EOPNOTSUPP);
if (user_data || (has_dirty_flag && no_dirty_ops))
return ERR_PTR(-EOPNOTSUPP);
- domain = mock_domain_alloc_paging(NULL);
+ domain = mock_domain_alloc_paging(dev);
if (!domain)
return ERR_PTR(-ENOMEM);
if (has_dirty_flag)
@@ -350,6 +399,9 @@ static int mock_domain_map_pages(struct iommu_domain *domain,
if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
flags = MOCK_PFN_LAST_IOVA;
+ if (pgsize != MOCK_IO_PAGE_SIZE) {
+ flags |= MOCK_PFN_HUGE_IOVA;
+ }
old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
flags),
@@ -394,20 +446,27 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
/*
* iommufd generates unmaps that must be a strict
- * superset of the map's performend So every starting
- * IOVA should have been an iova passed to map, and the
+ * superset of the map's performend So every
+ * starting/ending IOVA should have been an iova passed
+ * to map.
*
- * First IOVA must be present and have been a first IOVA
- * passed to map_pages
+ * This simple logic doesn't work when the HUGE_PAGE is
+ * turned on since the core code will automatically
+ * switch between the two page sizes creating a break in
+ * the unmap calls. The break can land in the middle of
+ * contiguous IOVA.
*/
- if (first) {
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_START_IOVA));
- first = false;
+ if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
+ if (first) {
+ WARN_ON(ent && !(xa_to_value(ent) &
+ MOCK_PFN_START_IOVA));
+ first = false;
+ }
+ if (pgcount == 1 &&
+ cur + MOCK_IO_PAGE_SIZE == pgsize)
+ WARN_ON(ent && !(xa_to_value(ent) &
+ MOCK_PFN_LAST_IOVA));
}
- if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_LAST_IOVA));
iova += MOCK_IO_PAGE_SIZE;
ret += MOCK_IO_PAGE_SIZE;
@@ -595,7 +654,7 @@ static void mock_dev_release(struct device *dev)
{
struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
- atomic_dec(&mock_dev_num);
+ ida_free(&mock_dev_ida, mdev->id);
kfree(mdev);
}
@@ -604,7 +663,8 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
struct mock_dev *mdev;
int rc;
- if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY))
+ if (dev_flags &
+ ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
return ERR_PTR(-EINVAL);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
@@ -616,8 +676,12 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
mdev->dev.release = mock_dev_release;
mdev->dev.bus = &iommufd_mock_bus_type.bus;
- rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
- atomic_inc_return(&mock_dev_num));
+ rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
+ if (rc < 0)
+ goto err_put;
+ mdev->id = rc;
+
+ rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
if (rc)
goto err_put;
@@ -1119,7 +1183,7 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
}
if (flags & MOCK_FLAGS_ACCESS_SYZ)
- iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
+ iova = iommufd_test_syz_conv_iova(staccess->access,
&cmd->access_pages.iova);
npages = (ALIGN(iova + length, PAGE_SIZE) -
@@ -1221,8 +1285,8 @@ static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
}
if (flags & MOCK_FLAGS_ACCESS_SYZ)
- iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
- &cmd->access_rw.iova);
+ iova = iommufd_test_syz_conv_iova(staccess->access,
+ &cmd->access_rw.iova);
rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
if (rc)
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 5559c943f03f..2b0b3175cea0 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -2,7 +2,7 @@
/*
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver
*
- * Copyright (C) 2014-2017 Broadcom
+ * Copyright (C) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -112,6 +112,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
generic_handle_domain_irq(b->domain, irq);
} while (status);
out:
+ /* Don't ack parent before all device writes are done */
+ wmb();
+
chained_irq_exit(chip, desc);
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d097001c1e3e..b822752c4261 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -207,6 +207,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
}
+static bool rdists_support_shareable(void)
+{
+ return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
+}
+
static u16 get_its_list(struct its_vm *vm)
{
struct its_node *its;
@@ -2710,10 +2715,12 @@ static u64 inherit_vpe_l1_table_from_its(void)
break;
}
val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
- val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
- FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
- val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
- FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+ if (rdists_support_shareable()) {
+ val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
+ FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
+ FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+ }
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
return val;
@@ -2936,8 +2943,10 @@ static int allocate_vpe_l1_table(void)
WARN_ON(!IS_ALIGNED(pa, psz));
val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
- val |= GICR_VPROPBASER_RaWb;
- val |= GICR_VPROPBASER_InnerShareable;
+ if (rdists_support_shareable()) {
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ }
val |= GICR_VPROPBASER_4_1_Z;
val |= GICR_VPROPBASER_4_1_VALID;
@@ -3126,7 +3135,7 @@ static void its_cpu_init_lpis(void)
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
- if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
+ if (!rdists_support_shareable())
tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
@@ -3153,7 +3162,7 @@ static void its_cpu_init_lpis(void)
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
- if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
+ if (!rdists_support_shareable())
tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
@@ -3172,6 +3181,7 @@ static void its_cpu_init_lpis(void)
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
+out:
if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
@@ -3207,7 +3217,6 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
-out:
gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
smp_processor_id(),
@@ -3817,8 +3826,9 @@ static int its_vpe_set_affinity(struct irq_data *d,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- int from, cpu = cpumask_first(mask_val);
+ struct cpumask common, *table_mask;
unsigned long flags;
+ int from, cpu;
/*
* Changing affinity is mega expensive, so let's be as lazy as
@@ -3834,19 +3844,22 @@ static int its_vpe_set_affinity(struct irq_data *d,
* taken on any vLPI handling path that evaluates vpe->col_idx.
*/
from = vpe_to_cpuid_lock(vpe, &flags);
- if (from == cpu)
- goto out;
-
- vpe->col_idx = cpu;
+ table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
/*
- * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
- * is sharing its VPE table with the current one.
+ * If we are offered another CPU in the same GICv4.1 ITS
+ * affinity, pick this one. Otherwise, any CPU will do.
*/
- if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
- cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
+ if (table_mask && cpumask_and(&common, mask_val, table_mask))
+ cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
+ else
+ cpu = cpumask_first(mask_val);
+
+ if (from == cpu)
goto out;
+ vpe->col_idx = cpu;
+
its_send_vmovp(vpe);
its_vpe_db_proxy_move(vpe, from, cpu);
@@ -3880,14 +3893,18 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
GENMASK_ULL(51, 12);
val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
- val |= GICR_VPROPBASER_RaWb;
- val |= GICR_VPROPBASER_InnerShareable;
+ if (rdists_support_shareable()) {
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ }
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16);
- val |= GICR_VPENDBASER_RaWaWb;
- val |= GICR_VPENDBASER_InnerShareable;
+ if (rdists_support_shareable()) {
+ val |= GICR_VPENDBASER_RaWaWb;
+ val |= GICR_VPENDBASER_InnerShareable;
+ }
/*
* There is no good way of finding out if the pending table is
* empty as we can race against the doorbell interrupt very
@@ -5078,6 +5095,8 @@ static int __init its_probe_one(struct its_node *its)
u32 ctlr;
int err;
+ its_enable_quirks(its);
+
if (is_v4(its)) {
if (!(its->typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(its);
@@ -5429,7 +5448,6 @@ static int __init its_of_probe(struct device_node *node)
if (!its)
return -ENOMEM;
- its_enable_quirks(its);
err = its_probe_one(its);
if (err) {
its_node_destroy(its);
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 1623cd779175..b3736bdd4b9f 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -241,7 +241,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
int ret;
unsigned int i, type;
unsigned long hwirq = 0;
- struct eiointc *priv = domain->host_data;
+ struct eiointc_priv *priv = domain->host_data;
ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
if (ret)
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 5101a3fb11df..58881d313979 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -235,22 +235,17 @@ static const struct irq_domain_ops mbigen_domain_ops = {
static int mbigen_of_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
- struct device *parent;
struct platform_device *child;
struct irq_domain *domain;
struct device_node *np;
u32 num_pins;
int ret = 0;
- parent = bus_get_dev_root(&platform_bus_type);
- if (!parent)
- return -ENODEV;
-
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_property_read_bool(np, "interrupt-controller"))
continue;
- child = of_platform_device_create(np, NULL, parent);
+ child = of_platform_device_create(np, NULL, NULL);
if (!child) {
ret = -ENOMEM;
break;
@@ -273,7 +268,6 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
}
}
- put_device(parent);
if (ret)
of_node_put(np);
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index cda5838d2232..7942d8eb3d00 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -389,8 +389,8 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
/* Don't use devm_ioremap_resource, as we're accessing a shared region. */
priv->base = devm_ioremap(dev, res.start, resource_size(&res));
of_node_put(msgram_np);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ if (!priv->base)
+ return -ENOMEM;
} else {
/* Otherwise, fall back to simple MMIO. */
priv->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 5b7bc4fd9517..bf0b40b0fad4 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -148,7 +148,13 @@ static void plic_irq_eoi(struct irq_data *d)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ if (unlikely(irqd_irq_disabled(d))) {
+ plic_toggle(handler, d->hwirq, 1);
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ plic_toggle(handler, d->hwirq, 0);
+ } else {
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ }
}
#ifdef CONFIG_SMP
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 095b9b49aa82..e6757a30dcca 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -22,6 +22,8 @@
#include "dm-ima.h"
#define DM_RESERVED_MAX_IOS 1024
+#define DM_MAX_TARGETS 1048576
+#define DM_MAX_TARGET_PARAMS 1024
struct dm_io;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 855b482cbff1..59445763e55a 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -53,15 +53,17 @@
struct convert_context {
struct completion restart;
struct bio *bio_in;
- struct bio *bio_out;
struct bvec_iter iter_in;
+ struct bio *bio_out;
struct bvec_iter iter_out;
- u64 cc_sector;
atomic_t cc_pending;
+ u64 cc_sector;
union {
struct skcipher_request *req;
struct aead_request *req_aead;
} r;
+ bool aead_recheck;
+ bool aead_failed;
};
@@ -73,10 +75,8 @@ struct dm_crypt_io {
struct bio *base_bio;
u8 *integrity_metadata;
bool integrity_metadata_from_pool:1;
- bool in_tasklet:1;
struct work_struct work;
- struct tasklet_struct tasklet;
struct convert_context ctx;
@@ -84,6 +84,8 @@ struct dm_crypt_io {
blk_status_t error;
sector_t sector;
+ struct bvec_iter saved_bi_iter;
+
struct rb_node rb_node;
} CRYPTO_MINALIGN_ATTR;
@@ -1372,10 +1374,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
if (r == -EBADMSG) {
sector_t s = le64_to_cpu(*sector);
- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
- ctx->bio_in->bi_bdev, s);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
- ctx->bio_in, s, 0);
+ ctx->aead_failed = true;
+ if (ctx->aead_recheck) {
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+ ctx->bio_in, s, 0);
+ }
}
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
@@ -1759,10 +1764,11 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->base_bio = bio;
io->sector = sector;
io->error = 0;
+ io->ctx.aead_recheck = false;
+ io->ctx.aead_failed = false;
io->ctx.r.req = NULL;
io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false;
- io->in_tasklet = false;
atomic_set(&io->io_pending, 0);
}
@@ -1771,12 +1777,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}
-static void kcryptd_io_bio_endio(struct work_struct *work)
-{
- struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
-
- bio_endio(io->base_bio);
-}
+static void kcryptd_queue_read(struct dm_crypt_io *io);
/*
* One of the bios was finished. Check for completion of
@@ -1791,6 +1792,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (!atomic_dec_and_test(&io->io_pending))
return;
+ if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
+ cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
+ io->ctx.aead_recheck = true;
+ io->ctx.aead_failed = false;
+ io->error = 0;
+ kcryptd_queue_read(io);
+ return;
+ }
+
if (io->ctx.r.req)
crypt_free_req(cc, io->ctx.r.req, base_bio);
@@ -1801,20 +1811,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
base_bio->bi_status = error;
- /*
- * If we are running this function from our tasklet,
- * we can't call bio_endio() here, because it will call
- * clone_endio() from dm.c, which in turn will
- * free the current struct dm_crypt_io structure with
- * our tasklet. In this case we need to delay bio_endio()
- * execution to after the tasklet is done and dequeued.
- */
- if (io->in_tasklet) {
- INIT_WORK(&io->work, kcryptd_io_bio_endio);
- queue_work(cc->io_queue, &io->work);
- return;
- }
-
bio_endio(base_bio);
}
@@ -1840,15 +1836,19 @@ static void crypt_endio(struct bio *clone)
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc;
unsigned int rw = bio_data_dir(clone);
- blk_status_t error;
+ blk_status_t error = clone->bi_status;
+
+ if (io->ctx.aead_recheck && !error) {
+ kcryptd_queue_crypt(io);
+ return;
+ }
/*
* free the processed pages
*/
- if (rw == WRITE)
+ if (rw == WRITE || io->ctx.aead_recheck)
crypt_free_buffer_pages(cc, clone);
- error = clone->bi_status;
bio_put(clone);
if (rw == READ && !error) {
@@ -1869,6 +1869,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
struct crypt_config *cc = io->cc;
struct bio *clone;
+ if (io->ctx.aead_recheck) {
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
+ return 1;
+ crypt_inc_pending(io);
+ clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+ if (unlikely(!clone)) {
+ crypt_dec_pending(io);
+ return 1;
+ }
+ clone->bi_iter.bi_sector = cc->start + io->sector;
+ crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
+ io->saved_bi_iter = clone->bi_iter;
+ dm_submit_bio_remap(io->base_bio, clone);
+ return 0;
+ }
+
/*
* We need the original biovec array in order to decrypt the whole bio
* data *afterwards* -- thanks to immutable biovecs we don't need to
@@ -2095,6 +2111,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.bio_out = clone;
io->ctx.iter_out = clone->bi_iter;
+ if (crypt_integrity_aead(cc)) {
+ bio_copy_data(clone, io->base_bio);
+ io->ctx.bio_in = clone;
+ io->ctx.iter_in = clone->bi_iter;
+ }
+
sector += bio_sectors(clone);
crypt_inc_pending(io);
@@ -2131,6 +2153,14 @@ dec:
static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
{
+ if (io->ctx.aead_recheck) {
+ if (!io->error) {
+ io->ctx.bio_in->bi_iter = io->saved_bi_iter;
+ bio_copy_data(io->base_bio, io->ctx.bio_in);
+ }
+ crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
+ bio_put(io->ctx.bio_in);
+ }
crypt_dec_pending(io);
}
@@ -2160,11 +2190,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
- crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
- io->sector);
+ if (io->ctx.aead_recheck) {
+ io->ctx.cc_sector = io->sector + cc->iv_offset;
+ r = crypt_convert(cc, &io->ctx,
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ } else {
+ crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
+ io->sector);
- r = crypt_convert(cc, &io->ctx,
- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ r = crypt_convert(cc, &io->ctx,
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ }
/*
* Crypto API backlogged the request, because its queue was full
* and we're in softirq context, so continue from a workqueue
@@ -2206,10 +2242,13 @@ static void kcryptd_async_done(void *data, int error)
if (error == -EBADMSG) {
sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
- ctx->bio_in->bi_bdev, s);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
- ctx->bio_in, s, 0);
+ ctx->aead_failed = true;
+ if (ctx->aead_recheck) {
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+ ctx->bio_in, s, 0);
+ }
io->error = BLK_STS_PROTECTION;
} else if (error < 0)
io->error = BLK_STS_IOERR;
@@ -2246,11 +2285,6 @@ static void kcryptd_crypt(struct work_struct *work)
kcryptd_crypt_write_convert(io);
}
-static void kcryptd_crypt_tasklet(unsigned long work)
-{
- kcryptd_crypt((struct work_struct *)work);
-}
-
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->cc;
@@ -2262,15 +2296,10 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
* irqs_disabled(): the kernel may run some IO completion from the idle thread, but
* it is being executed with irqs disabled.
*/
- if (in_hardirq() || irqs_disabled()) {
- io->in_tasklet = true;
- tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
- tasklet_schedule(&io->tasklet);
+ if (!(in_hardirq() || irqs_disabled())) {
+ kcryptd_crypt(&io->work);
return;
}
-
- kcryptd_crypt(&io->work);
- return;
}
INIT_WORK(&io->work, kcryptd_crypt);
@@ -3144,7 +3173,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
if (!strcasecmp(sval, "aead")) {
set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
- } else if (strcasecmp(sval, "none")) {
+ } else if (strcasecmp(sval, "none")) {
ti->error = "Unknown integrity profile";
return -EINVAL;
}
@@ -3673,7 +3702,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 24, 0},
+ .version = {1, 25, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c5f03aab4552..1fc901df84eb 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -278,6 +278,8 @@ struct dm_integrity_c {
atomic64_t number_of_mismatches;
+ mempool_t recheck_pool;
+
struct notifier_block reboot_notifier;
};
@@ -1689,6 +1691,77 @@ failed:
get_random_bytes(result, ic->tag_size);
}
+static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
+{
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ struct dm_integrity_c *ic = dio->ic;
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ sector_t sector, logical_sector, area, offset;
+ struct page *page;
+ void *buffer;
+
+ get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
+ dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
+ &dio->metadata_offset);
+ sector = get_data_sector(ic, area, offset);
+ logical_sector = dio->range.logical_sector;
+
+ page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
+ buffer = page_to_virt(page);
+
+ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
+ unsigned pos = 0;
+
+ do {
+ char *mem;
+ int r;
+ struct dm_io_request io_req;
+ struct dm_io_region io_loc;
+ io_req.bi_opf = REQ_OP_READ;
+ io_req.mem.type = DM_IO_KMEM;
+ io_req.mem.ptr.addr = buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = ic->io;
+ io_loc.bdev = ic->dev->bdev;
+ io_loc.sector = sector;
+ io_loc.count = ic->sectors_per_block;
+
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r)) {
+ dio->bi_status = errno_to_blk_status(r);
+ goto free_ret;
+ }
+
+ integrity_sector_checksum(ic, logical_sector, buffer, checksum);
+ r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
+ &dio->metadata_offset, ic->tag_size, TAG_CMP);
+ if (r) {
+ if (r > 0) {
+ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+ bio->bi_bdev, logical_sector);
+ atomic64_inc(&ic->number_of_mismatches);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
+ bio, logical_sector, 0);
+ r = -EILSEQ;
+ }
+ dio->bi_status = errno_to_blk_status(r);
+ goto free_ret;
+ }
+
+ mem = bvec_kmap_local(&bv);
+ memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
+ kunmap_local(mem);
+
+ pos += ic->sectors_per_block << SECTOR_SHIFT;
+ sector += ic->sectors_per_block;
+ logical_sector += ic->sectors_per_block;
+ } while (pos < bv.bv_len);
+ }
+free_ret:
+ mempool_free(page, &ic->recheck_pool);
+}
+
static void integrity_metadata(struct work_struct *w)
{
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
@@ -1776,15 +1849,8 @@ again:
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
- sector_t s;
-
- s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
- DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
- bio->bi_bdev, s);
- r = -EILSEQ;
- atomic64_inc(&ic->number_of_mismatches);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
- bio, s, 0);
+ integrity_recheck(dio, checksums);
+ goto skip_io;
}
if (likely(checksums != checksums_onstack))
kfree(checksums);
@@ -4261,6 +4327,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
goto bad;
}
+ r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
+ if (r) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
if (!ic->metadata_wq) {
@@ -4609,6 +4681,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
+ mempool_exit(&ic->recheck_pool);
mempool_exit(&ic->journal_io_mempool);
if (ic->io)
dm_io_client_destroy(ic->io);
@@ -4661,7 +4734,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index e65058e0ed06..3b1ad7127cb8 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1941,7 +1941,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
minimum_data_size - sizeof(param_kernel->version)))
return -EFAULT;
- if (param_kernel->data_size < minimum_data_size) {
+ if (unlikely(param_kernel->data_size < minimum_data_size) ||
+ unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) {
DMERR("Invalid data size in the ioctl structure: %u",
param_kernel->data_size);
return -EINVAL;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index bdc14ec99814..1e5d988f44da 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -66,6 +66,9 @@ struct dm_stats_last_position {
unsigned int last_rw;
};
+#define DM_STAT_MAX_ENTRIES 8388608
+#define DM_STAT_MAX_HISTOGRAM_ENTRIES 134217728
+
/*
* A typo on the command line could possibly make the kernel run out of memory
* and crash. To prevent the crash we account all used memory. We fail if we
@@ -285,6 +288,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
return -EOVERFLOW;
+ if (n_entries > DM_STAT_MAX_ENTRIES)
+ return -EOVERFLOW;
+
shared_alloc_size = struct_size(s, stat_shared, n_entries);
if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
return -EOVERFLOW;
@@ -297,6 +303,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
return -EOVERFLOW;
+ if ((n_histogram_entries + 1) * (size_t)n_entries > DM_STAT_MAX_HISTOGRAM_ENTRIES)
+ return -EOVERFLOW;
+
if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
return -ENOMEM;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 260b5b8f2b0d..41f1d731ae5a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -129,7 +129,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
int dm_table_create(struct dm_table **result, blk_mode_t mode,
unsigned int num_targets, struct mapped_device *md)
{
- struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
+ struct dm_table *t;
+
+ if (num_targets > DM_MAX_TARGETS)
+ return -EOVERFLOW;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
@@ -144,7 +149,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode,
if (!num_targets) {
kfree(t);
- return -ENOMEM;
+ return -EOVERFLOW;
}
if (alloc_targets(t, num_targets)) {
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 14e58ae70521..1b591bfa90d5 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -482,6 +482,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
return 0;
}
+static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len)
+{
+ memcpy(data, io->recheck_buffer, len);
+ io->recheck_buffer += len;
+
+ return 0;
+}
+
+static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
+ struct bvec_iter start, sector_t cur_block)
+{
+ struct page *page;
+ void *buffer;
+ int r;
+ struct dm_io_request io_req;
+ struct dm_io_region io_loc;
+
+ page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
+ buffer = page_to_virt(page);
+
+ io_req.bi_opf = REQ_OP_READ;
+ io_req.mem.type = DM_IO_KMEM;
+ io_req.mem.ptr.addr = buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = v->io;
+ io_loc.bdev = v->data_dev->bdev;
+ io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
+ io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r))
+ goto free_ret;
+
+ r = verity_hash(v, verity_io_hash_req(v, io), buffer,
+ 1 << v->data_dev_block_bits,
+ verity_io_real_digest(v, io), true);
+ if (unlikely(r))
+ goto free_ret;
+
+ if (memcmp(verity_io_real_digest(v, io),
+ verity_io_want_digest(v, io), v->digest_size)) {
+ r = -EIO;
+ goto free_ret;
+ }
+
+ io->recheck_buffer = buffer;
+ r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
+ if (unlikely(r))
+ goto free_ret;
+
+ r = 0;
+free_ret:
+ mempool_free(page, &v->recheck_pool);
+
+ return r;
+}
+
static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
u8 *data, size_t len)
{
@@ -508,9 +565,7 @@ static int verity_verify_io(struct dm_verity_io *io)
{
bool is_zero;
struct dm_verity *v = io->v;
-#if defined(CONFIG_DM_VERITY_FEC)
struct bvec_iter start;
-#endif
struct bvec_iter iter_copy;
struct bvec_iter *iter;
struct crypto_wait wait;
@@ -561,10 +616,7 @@ static int verity_verify_io(struct dm_verity_io *io)
if (unlikely(r < 0))
return r;
-#if defined(CONFIG_DM_VERITY_FEC)
- if (verity_fec_is_enabled(v))
- start = *iter;
-#endif
+ start = *iter;
r = verity_for_io_block(v, io, iter, &wait);
if (unlikely(r < 0))
return r;
@@ -586,6 +638,10 @@ static int verity_verify_io(struct dm_verity_io *io)
* tasklet since it may sleep, so fallback to work-queue.
*/
return -EAGAIN;
+ } else if (verity_recheck(v, io, start, cur_block) == 0) {
+ if (v->validated_blocks)
+ set_bit(cur_block, v->validated_blocks);
+ continue;
#if defined(CONFIG_DM_VERITY_FEC)
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0) {
@@ -645,23 +701,6 @@ static void verity_work(struct work_struct *w)
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
-static void verity_tasklet(unsigned long data)
-{
- struct dm_verity_io *io = (struct dm_verity_io *)data;
- int err;
-
- io->in_tasklet = true;
- err = verity_verify_io(io);
- if (err == -EAGAIN || err == -ENOMEM) {
- /* fallback to retrying with work-queue */
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
- return;
- }
-
- verity_finish_io(io, errno_to_blk_status(err));
-}
-
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
@@ -674,13 +713,8 @@ static void verity_end_io(struct bio *bio)
return;
}
- if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) {
- tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io);
- tasklet_schedule(&io->tasklet);
- } else {
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
- }
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
}
/*
@@ -963,6 +997,10 @@ static void verity_dtr(struct dm_target *ti)
if (v->verify_wq)
destroy_workqueue(v->verify_wq);
+ mempool_exit(&v->recheck_pool);
+ if (v->io)
+ dm_io_client_destroy(v->io);
+
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
@@ -1401,6 +1439,20 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
v->hash_blocks = hash_position;
+ r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
+ if (unlikely(r)) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+
+ v->io = dm_io_client_create();
+ if (IS_ERR(v->io)) {
+ r = PTR_ERR(v->io);
+ v->io = NULL;
+ ti->error = "Cannot allocate dm io";
+ goto bad;
+ }
+
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
dm_bufio_alloc_callback, NULL,
@@ -1508,7 +1560,7 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index f9d522c870e6..db93a91169d5 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -11,6 +11,7 @@
#ifndef DM_VERITY_H
#define DM_VERITY_H
+#include <linux/dm-io.h>
#include <linux/dm-bufio.h>
#include <linux/device-mapper.h>
#include <linux/interrupt.h>
@@ -68,6 +69,9 @@ struct dm_verity {
unsigned long *validated_blocks; /* bitset blocks validated */
char *signature_key_desc; /* signature keyring reference */
+
+ struct dm_io_client *io;
+ mempool_t recheck_pool;
};
struct dm_verity_io {
@@ -76,14 +80,15 @@ struct dm_verity_io {
/* original value of bio->bi_end_io */
bio_end_io_t *orig_bi_end_io;
+ struct bvec_iter iter;
+
sector_t block;
unsigned int n_blocks;
bool in_tasklet;
- struct bvec_iter iter;
-
struct work_struct work;
- struct tasklet_struct tasklet;
+
+ char *recheck_buffer;
/*
* Three variably-size fields follow this struct:
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 074cb785eafc..b463c28c39ad 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -299,7 +299,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
long i;
wc->memory_map = NULL;
- pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
+ pages = vmalloc_array(p, sizeof(struct page *));
if (!pages) {
r = -ENOMEM;
goto err2;
@@ -330,7 +330,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
r = -ENOMEM;
goto err3;
}
- kvfree(pages);
+ vfree(pages);
wc->memory_vmapped = true;
}
@@ -341,7 +341,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
return 0;
err3:
- kvfree(pages);
+ vfree(pages);
err2:
dax_read_unlock(id);
err1:
@@ -962,7 +962,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
if (wc->entries)
return 0;
- wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
+ wc->entries = vmalloc_array(wc->n_blocks, sizeof(struct wc_entry));
if (!wc->entries)
return -ENOMEM;
for (b = 0; b < wc->n_blocks; b++) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2266358d8074..9e41a9aaba8b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -579,8 +579,12 @@ static void submit_flushes(struct work_struct *ws)
rcu_read_lock();
}
rcu_read_unlock();
- if (atomic_dec_and_test(&mddev->flush_pending))
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ /* The pair is percpu_ref_get() from md_flush_request() */
+ percpu_ref_put(&mddev->active_io);
+
queue_work(md_wq, &mddev->flush_work);
+ }
}
static void md_submit_flush_data(struct work_struct *ws)
@@ -8788,12 +8792,16 @@ void md_do_sync(struct md_thread *thread)
int ret;
/* just incase thread restarts... */
- if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
- test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
- if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
+
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ goto skip;
+
+ if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
+ !md_is_rdwr(mddev)) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- return;
+ goto skip;
}
if (mddev_is_clustered(mddev)) {
@@ -9368,13 +9376,19 @@ static void md_start_sync(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, sync_work);
int spares = 0;
bool suspend = false;
+ char *name;
- if (md_spares_need_change(mddev))
+ /*
+ * If reshape is still in progress, spares won't be added or removed
+ * from conf until reshape is done.
+ */
+ if (mddev->reshape_position == MaxSector &&
+ md_spares_need_change(mddev)) {
suspend = true;
+ mddev_suspend(mddev, false);
+ }
- suspend ? mddev_suspend_and_lock_nointr(mddev) :
- mddev_lock_nointr(mddev);
-
+ mddev_lock_nointr(mddev);
if (!md_is_rdwr(mddev)) {
/*
* On a read-only array we can:
@@ -9400,8 +9414,10 @@ static void md_start_sync(struct work_struct *ws)
if (spares)
md_bitmap_write_all(mddev->bitmap);
+ name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
+ "reshape" : "resync";
rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "resync"));
+ md_register_thread(md_do_sync, mddev, name));
if (!mddev->sync_thread) {
pr_warn("%s: could not start resync thread...\n",
mdname(mddev));
@@ -9445,6 +9461,20 @@ not_running:
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
+static void unregister_sync_thread(struct mddev *mddev)
+{
+ if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
+ /* resync/recovery still happening */
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ return;
+ }
+
+ if (WARN_ON_ONCE(!mddev->sync_thread))
+ return;
+
+ md_reap_sync_thread(mddev);
+}
+
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
@@ -9469,9 +9499,6 @@ not_running:
*/
void md_check_recovery(struct mddev *mddev)
{
- if (READ_ONCE(mddev->suspended))
- return;
-
if (mddev->bitmap)
md_bitmap_daemon_work(mddev);
@@ -9485,7 +9512,8 @@ void md_check_recovery(struct mddev *mddev)
}
if (!md_is_rdwr(mddev) &&
- !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
if ( ! (
(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
@@ -9507,8 +9535,7 @@ void md_check_recovery(struct mddev *mddev)
struct md_rdev *rdev;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- /* sync_work already queued. */
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ unregister_sync_thread(mddev);
goto unlock;
}
@@ -9571,16 +9598,7 @@ void md_check_recovery(struct mddev *mddev)
* still set.
*/
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
- /* resync/recovery still happening */
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- goto unlock;
- }
-
- if (WARN_ON_ONCE(!mddev->sync_thread))
- goto unlock;
-
- md_reap_sync_thread(mddev);
+ unregister_sync_thread(mddev);
goto unlock;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 24f0d799fd98..286f8b16c7bd 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2262,7 +2262,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
int sectors = r1_bio->sectors;
int read_disk = r1_bio->read_disk;
struct mddev *mddev = conf->mddev;
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[read_disk].rdev);
+ struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
if (exceed_read_errors(mddev, rdev)) {
r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7412066ea22c..a5f8419e2df1 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4175,11 +4175,7 @@ static int raid10_run(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread)
- goto out_free_conf;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
return 0;
@@ -4573,16 +4569,8 @@ out:
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread) {
- ret = -EAGAIN;
- goto abort;
- }
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
conf->reshape_checkpoint = jiffies;
- md_wakeup_thread(mddev->sync_thread);
md_new_event();
return 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8497880135ee..6a7a32f7fb91 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7936,11 +7936,7 @@ static int raid5_run(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread)
- goto abort;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
/* Ok, everything is just fine now */
@@ -8506,29 +8502,8 @@ static int raid5_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread) {
- mddev->recovery = 0;
- spin_lock_irq(&conf->device_lock);
- write_seqcount_begin(&conf->gen_lock);
- mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
- mddev->new_chunk_sectors =
- conf->chunk_sectors = conf->prev_chunk_sectors;
- mddev->new_layout = conf->algorithm = conf->prev_algo;
- rdev_for_each(rdev, mddev)
- rdev->new_data_offset = rdev->data_offset;
- smp_wmb();
- conf->generation --;
- conf->reshape_progress = MaxSector;
- mddev->reshape_position = MaxSector;
- write_seqcount_end(&conf->gen_lock);
- spin_unlock_irq(&conf->device_lock);
- return -EAGAIN;
- }
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
conf->reshape_checkpoint = jiffies;
- md_wakeup_thread(mddev->sync_thread);
md_new_event();
return 0;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 41a832dd1426..b6bf8f232f48 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -989,7 +989,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
bool no_previous_buffers = !q_num_bufs;
int ret = 0;
- if (q->num_buffers == q->max_num_buffers) {
+ if (q_num_bufs == q->max_num_buffers) {
dprintk(q, 1, "maximum number of buffers already allocated\n");
return -ENOBUFS;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 54d572c3b515..c575198e8354 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -671,8 +671,20 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
}
EXPORT_SYMBOL(vb2_querybuf);
-static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
+static void vb2_set_flags_and_caps(struct vb2_queue *q, u32 memory,
+ u32 *flags, u32 *caps, u32 *max_num_bufs)
{
+ if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
+ /*
+ * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
+ * but in order to avoid bugs we zero out all bits.
+ */
+ *flags = 0;
+ } else {
+ /* Clear all unknown flags. */
+ *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+ }
+
*caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
if (q->io_modes & VB2_MMAP)
*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
@@ -686,21 +698,9 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
if (q->supports_requests)
*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
-}
-
-static void validate_memory_flags(struct vb2_queue *q,
- int memory,
- u32 *flags)
-{
- if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
- /*
- * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
- * but in order to avoid bugs we zero out all bits.
- */
- *flags = 0;
- } else {
- /* Clear all unknown flags. */
- *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+ if (max_num_bufs) {
+ *max_num_bufs = q->max_num_buffers;
+ *caps |= V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS;
}
}
@@ -709,8 +709,8 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
int ret = vb2_verify_memory_type(q, req->memory, req->type);
u32 flags = req->flags;
- fill_buf_caps(q, &req->capabilities);
- validate_memory_flags(q, req->memory, &flags);
+ vb2_set_flags_and_caps(q, req->memory, &flags,
+ &req->capabilities, NULL);
req->flags = flags;
return ret ? ret : vb2_core_reqbufs(q, req->memory,
req->flags, &req->count);
@@ -751,11 +751,9 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
int ret = vb2_verify_memory_type(q, create->memory, f->type);
unsigned i;
- fill_buf_caps(q, &create->capabilities);
- validate_memory_flags(q, create->memory, &create->flags);
create->index = vb2_get_num_buffers(q);
- create->max_num_buffers = q->max_num_buffers;
- create->capabilities |= V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS;
+ vb2_set_flags_and_caps(q, create->memory, &create->flags,
+ &create->capabilities, &create->max_num_buffers);
if (create->count == 0)
return ret != -EBUSY ? ret : 0;
@@ -1006,8 +1004,8 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv,
int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
u32 flags = p->flags;
- fill_buf_caps(vdev->queue, &p->capabilities);
- validate_memory_flags(vdev->queue, p->memory, &flags);
+ vb2_set_flags_and_caps(vdev->queue, p->memory, &flags,
+ &p->capabilities, NULL);
p->flags = flags;
if (res)
return res;
@@ -1026,12 +1024,11 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv,
struct v4l2_create_buffers *p)
{
struct video_device *vdev = video_devdata(file);
- int res = vb2_verify_memory_type(vdev->queue, p->memory,
- p->format.type);
+ int res = vb2_verify_memory_type(vdev->queue, p->memory, p->format.type);
- p->index = vdev->queue->num_buffers;
- fill_buf_caps(vdev->queue, &p->capabilities);
- validate_memory_flags(vdev->queue, p->memory, &p->flags);
+ p->index = vb2_get_num_buffers(vdev->queue);
+ vb2_set_flags_and_caps(vdev->queue, p->memory, &p->flags,
+ &p->capabilities, &p->max_num_buffers);
/*
* If count == 0, then just check if memory and type are valid.
* Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
index bfe4caa79cc9..0d90b5820bef 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -272,7 +272,7 @@ static const struct wave5_match_data ti_wave521c_data = {
};
static const struct of_device_id wave5_dt_ids[] = {
- { .compatible = "ti,k3-j721s2-wave521c", .data = &ti_wave521c_data },
+ { .compatible = "ti,j721s2-wave521c", .data = &ti_wave521c_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, wave5_dt_ids);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index aebd3c12020b..c381c22135a2 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -725,6 +725,9 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
unsigned int i;
u32 status;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index 4b6b28c05b89..b757f75edecf 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -450,6 +450,7 @@ struct rkisp1_debug {
* @debug: debug params to be exposed on debugfs
* @info: version-specific ISP information
* @irqs: IRQ line numbers
+ * @irqs_enabled: the hardware is enabled and can cause interrupts
*/
struct rkisp1_device {
void __iomem *base_addr;
@@ -471,6 +472,7 @@ struct rkisp1_device {
struct rkisp1_debug debug;
const struct rkisp1_info *info;
int irqs[RKISP1_NUM_IRQS];
+ bool irqs_enabled;
};
/*
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
index b6e47e2f1b94..4202642e0523 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
@@ -196,6 +196,9 @@ irqreturn_t rkisp1_csi_isr(int irq, void *ctx)
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 val, status;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index f96f821a7b50..73cf08a74011 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -305,6 +305,24 @@ static int __maybe_unused rkisp1_runtime_suspend(struct device *dev)
{
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ rkisp1->irqs_enabled = false;
+ /* Make sure the IRQ handler will see the above */
+ mb();
+
+ /*
+ * Wait until any running IRQ handler has returned. The IRQ handler
+ * may get called even after this (as it's a shared interrupt line)
+ * but the 'irqs_enabled' flag will make the handler return immediately.
+ */
+ for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) {
+ if (rkisp1->irqs[il] == -1)
+ continue;
+
+ /* Skip if the irq line is the same as previous */
+ if (il == 0 || rkisp1->irqs[il - 1] != rkisp1->irqs[il])
+ synchronize_irq(rkisp1->irqs[il]);
+ }
+
clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks);
return pinctrl_pm_select_sleep_state(dev);
}
@@ -321,6 +339,10 @@ static int __maybe_unused rkisp1_runtime_resume(struct device *dev)
if (ret)
return ret;
+ rkisp1->irqs_enabled = true;
+ /* Make sure the IRQ handler will see the above */
+ mb();
+
return 0;
}
@@ -559,7 +581,7 @@ static int rkisp1_probe(struct platform_device *pdev)
rkisp1->irqs[il] = irq;
}
- ret = devm_request_irq(dev, irq, info->isrs[i].isr, 0,
+ ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED,
dev_driver_string(dev), dev);
if (ret) {
dev_err(dev, "request irq failed: %d\n", ret);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index f00873d31c42..78a1f7a1499b 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -976,6 +976,9 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 status, isp_err;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 2afe67ffa285..74d69ce22a33 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -319,6 +319,7 @@ config IR_PWM_TX
tristate "PWM IR transmitter"
depends on LIRC
depends on PWM
+ depends on HIGH_RES_TIMERS
depends on OF
help
Say Y if you want to use a PWM based IR transmitter. This is
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index fe17c7f98e81..52d82cbe7685 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -253,7 +253,7 @@ int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
if (attr->attach_flags)
return -EINVAL;
- rcdev = rc_dev_get_from_fd(attr->target_fd);
+ rcdev = rc_dev_get_from_fd(attr->target_fd, true);
if (IS_ERR(rcdev))
return PTR_ERR(rcdev);
@@ -278,7 +278,7 @@ int lirc_prog_detach(const union bpf_attr *attr)
if (IS_ERR(prog))
return PTR_ERR(prog);
- rcdev = rc_dev_get_from_fd(attr->target_fd);
+ rcdev = rc_dev_get_from_fd(attr->target_fd, true);
if (IS_ERR(rcdev)) {
bpf_prog_put(prog);
return PTR_ERR(rcdev);
@@ -303,7 +303,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
if (attr->query.query_flags)
return -EINVAL;
- rcdev = rc_dev_get_from_fd(attr->query.target_fd);
+ rcdev = rc_dev_get_from_fd(attr->query.target_fd, false);
if (IS_ERR(rcdev))
return PTR_ERR(rcdev);
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 196806709259..69e630d85262 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -332,6 +332,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP);
if (err) {
dev_err(irtoy->dev, "exit sample mode: %d\n", err);
+ kfree(buf);
return err;
}
@@ -339,6 +340,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
if (err) {
dev_err(irtoy->dev, "enter sample mode: %d\n", err);
+ kfree(buf);
return err;
}
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index a537734832c5..caad59f76793 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -814,7 +814,7 @@ void __exit lirc_dev_exit(void)
unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX);
}
-struct rc_dev *rc_dev_get_from_fd(int fd)
+struct rc_dev *rc_dev_get_from_fd(int fd, bool write)
{
struct fd f = fdget(fd);
struct lirc_fh *fh;
@@ -828,6 +828,9 @@ struct rc_dev *rc_dev_get_from_fd(int fd)
return ERR_PTR(-EINVAL);
}
+ if (write && !(f.file->f_mode & FMODE_WRITE))
+ return ERR_PTR(-EPERM);
+
fh = f.file->private_data;
dev = fh->rc;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index ef1e95e1af7f..7df949fc65e2 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -325,7 +325,7 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev);
void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc);
int lirc_register(struct rc_dev *dev);
void lirc_unregister(struct rc_dev *dev);
-struct rc_dev *rc_dev_get_from_fd(int fd);
+struct rc_dev *rc_dev_get_from_fd(int fd, bool write);
#else
static inline int lirc_dev_init(void) { return 0; }
static inline void lirc_dev_exit(void) {}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 1c6c62a7f7f5..03319a1fa97f 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -2191,7 +2191,7 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
int i;
spin_lock_irqsave(&cctx->lock, flags);
- for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
+ for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) {
if (cctx->session[i].sid == sess->sid) {
cctx->session[i].valid = false;
cctx->sesscount--;
diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
index 8aea2d070a40..d279a4f195e2 100644
--- a/drivers/misc/open-dice.c
+++ b/drivers/misc/open-dice.c
@@ -140,7 +140,6 @@ static int __init open_dice_probe(struct platform_device *pdev)
return -ENOMEM;
*drvdata = (struct open_dice_drvdata){
- .lock = __MUTEX_INITIALIZER(drvdata->lock),
.rmem = rmem,
.misc = (struct miscdevice){
.parent = dev,
@@ -150,6 +149,7 @@ static int __init open_dice_probe(struct platform_device *pdev)
.mode = 0600,
},
};
+ mutex_init(&drvdata->lock);
/* Index overflow check not needed, misc_register() will fail. */
snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f410bee50132..58ed7193a3ca 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1015,10 +1015,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
+ EXT_CSD_BUS_WIDTH_1,
};
static unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_1,
};
struct mmc_host *host = card->host;
unsigned idx, bus_width = 0;
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 2a2d949a9344..39f45c2b6de8 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -75,11 +75,15 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_irq);
int mmc_gpio_get_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
+ int cansleep;
if (!ctx || !ctx->ro_gpio)
return -ENOSYS;
- return gpiod_get_value_cansleep(ctx->ro_gpio);
+ cansleep = gpiod_cansleep(ctx->ro_gpio);
+ return cansleep ?
+ gpiod_get_value_cansleep(ctx->ro_gpio) :
+ gpiod_get_value(ctx->ro_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_ro);
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 35067e1e6cd8..f5da7f9baa52 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -225,6 +225,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
struct scatterlist *sg;
int i;
+ host->dma_in_progress = true;
+
if (!host->variant->dma_lli || data->sg_len == 1 ||
idma->use_bounce_buffer) {
u32 dma_addr;
@@ -263,9 +265,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
return 0;
}
+static void sdmmc_idma_error(struct mmci_host *host)
+{
+ struct mmc_data *data = host->data;
+ struct sdmmc_idma *idma = host->dma_priv;
+
+ if (!dma_inprogress(host))
+ return;
+
+ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
+ data->host_cookie = 0;
+
+ if (!idma->use_bounce_buffer)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+}
+
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
+ if (!dma_inprogress(host))
+ return;
+
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
if (!data->host_cookie)
sdmmc_idma_unprep_data(host, data, 0);
@@ -676,6 +699,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_setup = sdmmc_idma_setup,
.dma_start = sdmmc_idma_start,
.dma_finalize = sdmmc_idma_finalize,
+ .dma_error = sdmmc_idma_error,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
.busy_complete = sdmmc_busy_complete,
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 7bfee28116af..d4a02184784a 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -693,6 +693,35 @@ static int sdhci_pci_o2_init_sd_express(struct mmc_host *mmc, struct mmc_ios *io
return 0;
}
+static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ u32 scratch_32 = 0;
+ u8 scratch_8 = 0;
+
+ chip = slot->chip;
+
+ if (mode == MMC_POWER_OFF) {
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+
+ /* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
+ scratch_32 &= ~(O2_SD_SEL_DLL);
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
+
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+ }
+
+ sdhci_set_power(host, mode, vdd);
+}
+
static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
@@ -1051,6 +1080,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_pci_o2_set_power,
};
const struct sdhci_pci_fixes sdhci_o2 = {
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 8cf3a375de65..cc9d28b75eb9 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ktime.h>
+#include <linux/iopoll.h>
#include <linux/of_address.h>
#include "sdhci-pltfm.h"
@@ -109,6 +110,8 @@
#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
#define XENON_LOGIC_TIMING_VALUE 0x00AA8977
+#define XENON_MAX_PHY_TIMEOUT_LOOPS 100
+
/*
* List offset of PHY registers and some special register values
* in eMMC PHY 5.0 or eMMC PHY 5.1
@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
return 0;
}
+static int xenon_check_stability_internal_clk(struct sdhci_host *host)
+{
+ u32 reg;
+ int err;
+
+ err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
+ 1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
+ if (err)
+ dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
+
+ return err;
+}
+
/*
* eMMC 5.0/5.1 PHY init/re-init.
* eMMC PHY init should be executed after:
@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
+ int ret = xenon_check_stability_internal_clk(host);
+
+ if (ret)
+ return ret;
+
reg = sdhci_readl(host, phy_regs->timing_adj);
reg |= XENON_PHY_INITIALIZAION;
sdhci_writel(host, reg, phy_regs->timing_adj);
@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
/* get the wait time */
wait /= clock;
wait++;
- /* wait for host eMMC PHY init completes */
- udelay(wait);
- reg = sdhci_readl(host, phy_regs->timing_adj);
- reg &= XENON_PHY_INITIALIZAION;
- if (reg) {
+ /*
+ * AC5X spec says bit must be polled until zero.
+ * We see cases in which timeout can take longer
+ * than the standard calculation on AC5X, which is
+ * expected following the spec comment above.
+ * According to the spec, we must wait as long as
+ * it takes for that bit to toggle on AC5X.
+ * Cap that with 100 delay loops so we won't get
+ * stuck here forever:
+ */
+
+ ret = read_poll_timeout(sdhci_readl, reg,
+ !(reg & XENON_PHY_INITIALIZAION),
+ wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
+ false, host, phy_regs->timing_adj);
+ if (ret)
dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
- wait);
- return -ETIMEDOUT;
- }
+ wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
- return 0;
+ return ret;
}
#define ARMADA_3700_SOC_PAD_1_8V 0x1
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e451b28840d5..5887feb347a4 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -621,6 +621,7 @@ static void mtd_check_of_node(struct mtd_info *mtd)
if (plen == mtd_name_len &&
!strncmp(mtd->name, pname + offset, plen)) {
mtd_set_of_node(mtd, mtd_dn);
+ of_node_put(mtd_dn);
break;
}
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index a46698744850..5b0f5a9cef81 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -290,16 +290,13 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30),
- MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30),
- MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 2048, 512, 16, 4, 4, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
- MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
- MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30),
- MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 4096, 512, 8, 4, 4, 1024, 0, 30, 0, 64, 30),
+ MARVELL_LAYOUT( 4096, 512, 16, 8, 8, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0),
- MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30),
- MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30),
- MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 8192, 512, 8, 8, 8, 1024, 0, 30, 0, 160, 30),
+ MARVELL_LAYOUT( 8192, 512, 16, 16, 16, 512, 0, 30, 0, 32, 30),
};
/**
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 987710e09441..6023cba748bb 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -186,7 +186,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -207,6 +207,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
* report the maximum of 4 in this case
*/
/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status & STATUS_ECC_MASK) >> 2) |
((status2 & STATUS_ECC_MASK) >> 4);
@@ -228,7 +229,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -248,6 +249,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
* 1 ... 4 bits are flipped (and corrected)
*/
/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
case STATUS_ECC_UNCOR_ERROR:
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 8c651fdee039..57f1729066f2 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -186,4 +186,5 @@ static void __exit arcnet_raw_exit(void)
module_init(arcnet_raw_init);
module_exit(arcnet_raw_exit);
+MODULE_DESCRIPTION("ARCnet raw mode packet interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 8c3ccc7c83cd..53d10a04d1bd 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -312,6 +312,7 @@ module_param(node, int, 0);
module_param(io, int, 0);
module_param(irq, int, 0);
module_param_string(device, device, sizeof(device), 0);
+MODULE_DESCRIPTION("ARCnet COM90xx RIM I chipset driver");
MODULE_LICENSE("GPL");
static struct net_device *my_dev;
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index c09b567845e1..7a0a79973769 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -265,4 +265,5 @@ static void __exit capmode_module_exit(void)
module_init(capmode_module_init);
module_exit(capmode_module_exit);
+MODULE_DESCRIPTION("ARCnet CAP mode packet interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 7b5c8bb02f11..c5e571ec94c9 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -61,6 +61,7 @@ module_param(timeout, int, 0);
module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset PCI driver");
MODULE_LICENSE("GPL");
static void led_tx_set(struct led_classdev *led_cdev,
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 06e1651b594b..a0053e3992a3 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -399,6 +399,7 @@ EXPORT_SYMBOL(com20020_found);
EXPORT_SYMBOL(com20020_netdev_ops);
#endif
+MODULE_DESCRIPTION("ARCnet COM20020 chipset core driver");
MODULE_LICENSE("GPL");
#ifdef MODULE
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index dc3253b318da..75f08aa7528b 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -97,6 +97,7 @@ module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset PCMCIA driver");
MODULE_LICENSE("GPL");
/*====================================================================*/
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 37b47749fc8b..3b463fbc6402 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -350,6 +350,7 @@ static char device[9]; /* use eg. device=arc1 to change name */
module_param_hw(io, int, ioport, 0);
module_param_hw(irq, int, irq, 0);
module_param_string(device, device, sizeof(device), 0);
+MODULE_DESCRIPTION("ARCnet COM90xx IO mapped chipset driver");
MODULE_LICENSE("GPL");
#ifndef MODULE
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index f49dae194284..b3b287c16561 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -645,6 +645,7 @@ static void com90xx_copy_from_card(struct net_device *dev, int bufnum,
TIME(dev, "memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
}
+MODULE_DESCRIPTION("ARCnet COM90xx normal chipset driver");
MODULE_LICENSE("GPL");
static int __init com90xx_init(void)
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index a7752a5b647f..46519ca63a0a 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -78,6 +78,7 @@ static void __exit arcnet_rfc1051_exit(void)
module_init(arcnet_rfc1051_init);
module_exit(arcnet_rfc1051_exit);
+MODULE_DESCRIPTION("ARCNet packet format (RFC 1051) module");
MODULE_LICENSE("GPL");
/* Determine a packet's protocol ID.
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index a4c856282674..0edf35d971c5 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -35,6 +35,7 @@
#include "arcdevice.h"
+MODULE_DESCRIPTION("ARCNet packet format (RFC 1201) module");
MODULE_LICENSE("GPL");
static __be16 type_trans(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4e0600c7b050..a11748b8d69b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1819,6 +1819,8 @@ void bond_xdp_set_features(struct net_device *bond_dev)
bond_for_each_slave(bond, slave, iter)
val &= slave->dev->xdp_features;
+ val &= ~NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
xdp_set_features_flag(bond_dev, val);
}
@@ -5909,9 +5911,6 @@ void bond_setup(struct net_device *bond_dev)
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_dev->features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
-
- if (bond_xdp_check(bond))
- bond_dev->xdp_features = NETDEV_XDP_ACT_MASK;
}
/* Destroy a bonding device.
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 036d85ef07f5..dfdc039d92a6 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -346,7 +346,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* Neither of TDC parameters nor TDC flags are
* provided: do calculation
*/
- can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming,
+ can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt,
&priv->ctrlmode, priv->ctrlmode_supported);
} /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
* turned off. TDC is disabled: do nothing
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
index 237066d30704..14ca42491512 100644
--- a/drivers/net/dsa/dsa_loop_bdinfo.c
+++ b/drivers/net/dsa/dsa_loop_bdinfo.c
@@ -32,4 +32,5 @@ static int __init dsa_loop_bdinfo_init(void)
}
arch_initcall(dsa_loop_bdinfo_init)
+MODULE_DESCRIPTION("DSA mock-up switch driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 391c4dbdff42..3c1f657593a8 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2838,8 +2838,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
/* MT753x MAC works in 1G full duplex mode for all up-clocked
* variants.
*/
- if (interface == PHY_INTERFACE_MODE_INTERNAL ||
- interface == PHY_INTERFACE_MODE_TRGMII ||
+ if (interface == PHY_INTERFACE_MODE_TRGMII ||
(phy_interface_mode_is_8023z(interface))) {
speed = SPEED_1000;
duplex = DUPLEX_FULL;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 383b3c4d6f59..614cabb5c1b0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3659,7 +3659,7 @@ static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
int err;
if (!chip->info->ops->phy_read_c45)
- return -EOPNOTSUPP;
+ return 0xffff;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index c51f40960961..7a864329cb72 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -2051,12 +2051,11 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->info = of_device_get_match_data(priv->dev);
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
- GPIOD_ASIS);
+ GPIOD_OUT_HIGH);
if (IS_ERR(priv->reset_gpio))
return PTR_ERR(priv->reset_gpio);
if (priv->reset_gpio) {
- gpiod_set_value_cansleep(priv->reset_gpio, 1);
/* The active low duration must be greater than 10 ms
* and checkpatch.pl wants 20 ms.
*/
diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c
index 0e0aa4016858..c5636245f1ca 100644
--- a/drivers/net/ethernet/8390/8390.c
+++ b/drivers/net/ethernet/8390/8390.c
@@ -100,4 +100,5 @@ static void __exit ns8390_module_exit(void)
module_init(ns8390_module_init);
module_exit(ns8390_module_exit);
#endif /* MODULE */
+MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c
index 6834742057b3..6d429b11e9c6 100644
--- a/drivers/net/ethernet/8390/8390p.c
+++ b/drivers/net/ethernet/8390/8390p.c
@@ -102,4 +102,5 @@ static void __exit NS8390p_cleanup_module(void)
module_init(NS8390p_init_module);
module_exit(NS8390p_cleanup_module);
+MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index a09f383dd249..828edca8d30c 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -610,4 +610,5 @@ static int init_pcmcia(void)
return 1;
}
+MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 24f49a8ff903..fd9dcdc356e6 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -270,4 +270,5 @@ static void __exit hydra_cleanup_module(void)
module_init(hydra_init_module);
module_exit(hydra_cleanup_module);
+MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index 265976e3b64a..6cc0e190aa79 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -296,4 +296,5 @@ static void __exit stnic_cleanup(void)
module_init(stnic_probe);
module_exit(stnic_cleanup);
+MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index d70390e9d03d..c24dd4fe7a10 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -443,4 +443,5 @@ static void __exit zorro8390_cleanup_module(void)
module_init(zorro8390_init_module);
module_exit(zorro8390_cleanup_module);
+MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index da3bdd302502..760a9a60bc15 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -21,6 +21,7 @@ config ADIN1110
tristate "Analog Devices ADIN1110 MAC-PHY"
depends on SPI && NET_SWITCHDEV
select CRC8
+ select PHYLIB
help
Say yes here to build support for Analog Devices ADIN1110
Low Power 10BASE-T1L Ethernet MAC-PHY.
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index 5beadabc2136..ea773cfa0af6 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -63,6 +63,15 @@ static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
return nq_work;
}
+static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc)
+{
+ if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) ||
+ pdsc->state & BIT_ULL(PDSC_S_FW_DEAD))
+ return false;
+
+ return refcount_inc_not_zero(&pdsc->adminq_refcnt);
+}
+
void pdsc_process_adminq(struct pdsc_qcq *qcq)
{
union pds_core_adminq_comp *comp;
@@ -75,9 +84,9 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
int aq_work = 0;
int credits;
- /* Don't process AdminQ when shutting down */
- if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
- dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+ /* Don't process AdminQ when it's not up */
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
__func__);
return;
}
@@ -124,6 +133,7 @@ credits:
pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
credits,
PDS_CORE_INTR_CRED_REARM);
+ refcount_dec(&pdsc->adminq_refcnt);
}
void pdsc_work_thread(struct work_struct *work)
@@ -135,18 +145,20 @@ void pdsc_work_thread(struct work_struct *work)
irqreturn_t pdsc_adminq_isr(int irq, void *data)
{
- struct pdsc_qcq *qcq = data;
- struct pdsc *pdsc = qcq->pdsc;
+ struct pdsc *pdsc = data;
+ struct pdsc_qcq *qcq;
- /* Don't process AdminQ when shutting down */
- if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
- dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+ /* Don't process AdminQ when it's not up */
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_err(pdsc->dev, "%s: called while adminq is unavailable\n",
__func__);
return IRQ_HANDLED;
}
+ qcq = &pdsc->adminqcq;
queue_work(pdsc->wq, &qcq->work);
pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
+ refcount_dec(&pdsc->adminq_refcnt);
return IRQ_HANDLED;
}
@@ -179,10 +191,16 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
/* Check that the FW is running */
if (!pdsc_is_fw_running(pdsc)) {
- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
-
- dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
- __func__, fw_status);
+ if (pdsc->info_regs) {
+ u8 fw_status =
+ ioread8(&pdsc->info_regs->fw_status);
+
+ dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ } else {
+ dev_info(pdsc->dev, "%s: post failed - BARs not setup\n",
+ __func__);
+ }
ret = -ENXIO;
goto err_out_unlock;
@@ -230,6 +248,12 @@ int pdsc_adminq_post(struct pdsc *pdsc,
int err = 0;
int index;
+ if (!pdsc_adminq_inc_if_up(pdsc)) {
+ dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n",
+ __func__, cmd->opcode);
+ return -ENXIO;
+ }
+
wc.qcq = &pdsc->adminqcq;
index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
if (index < 0) {
@@ -248,10 +272,16 @@ int pdsc_adminq_post(struct pdsc *pdsc,
break;
if (!pdsc_is_fw_running(pdsc)) {
- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
-
- dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
- __func__, fw_status);
+ if (pdsc->info_regs) {
+ u8 fw_status =
+ ioread8(&pdsc->info_regs->fw_status);
+
+ dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ } else {
+ dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n",
+ __func__);
+ }
err = -ENXIO;
break;
}
@@ -285,6 +315,8 @@ err_out:
queue_work(pdsc->wq, &pdsc->health_work);
}
+ refcount_dec(&pdsc->adminq_refcnt);
+
return err;
}
EXPORT_SYMBOL_GPL(pdsc_adminq_post);
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 0d2091e9eb28..7658a7286767 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -125,7 +125,7 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
snprintf(name, sizeof(name), "%s-%d-%s",
PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
- index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq);
+ index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
if (index < 0)
return index;
qcq->intx = index;
@@ -404,10 +404,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
int numdescs;
int err;
- if (init)
- err = pdsc_dev_init(pdsc);
- else
- err = pdsc_dev_reinit(pdsc);
+ err = pdsc_dev_init(pdsc);
if (err)
return err;
@@ -450,6 +447,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
pdsc_debugfs_add_viftype(pdsc);
}
+ refcount_set(&pdsc->adminq_refcnt, 1);
clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
return 0;
@@ -464,6 +462,8 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
if (!pdsc->pdev->is_virtfn)
pdsc_devcmd_reset(pdsc);
+ if (pdsc->adminqcq.work.func)
+ cancel_work_sync(&pdsc->adminqcq.work);
pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
pdsc_qcq_free(pdsc, &pdsc->adminqcq);
@@ -476,10 +476,9 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
for (i = 0; i < pdsc->nintrs; i++)
pdsc_intr_free(pdsc, i);
- if (removing) {
- kfree(pdsc->intr_info);
- pdsc->intr_info = NULL;
- }
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+ pdsc->nintrs = 0;
}
if (pdsc->kern_dbpage) {
@@ -487,6 +486,7 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
pdsc->kern_dbpage = NULL;
}
+ pci_free_irq_vectors(pdsc->pdev);
set_bit(PDSC_S_FW_DEAD, &pdsc->state);
}
@@ -512,6 +512,24 @@ void pdsc_stop(struct pdsc *pdsc)
PDS_CORE_INTR_MASK_SET);
}
+static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
+{
+ /* The driver initializes the adminq_refcnt to 1 when the adminq is
+ * allocated and ready for use. Other users/requesters will increment
+ * the refcnt while in use. If the refcnt is down to 1 then the adminq
+ * is not in use and the refcnt can be cleared and adminq freed. Before
+ * calling this function the driver will set PDSC_S_FW_DEAD, which
+ * prevent subsequent attempts to use the adminq and increment the
+ * refcnt to fail. This guarantees that this function will eventually
+ * exit.
+ */
+ while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
+ dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
+ __func__);
+ cpu_relax();
+ }
+}
+
void pdsc_fw_down(struct pdsc *pdsc)
{
union pds_core_notifyq_comp reset_event = {
@@ -527,6 +545,8 @@ void pdsc_fw_down(struct pdsc *pdsc)
if (pdsc->pdev->is_virtfn)
return;
+ pdsc_adminq_wait_and_dec_once_unused(pdsc);
+
/* Notify clients of fw_down */
if (pdsc->fw_reporter)
devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
@@ -577,7 +597,13 @@ err_out:
static void pdsc_check_pci_health(struct pdsc *pdsc)
{
- u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+ u8 fw_status;
+
+ /* some sort of teardown already in progress */
+ if (!pdsc->info_regs)
+ return;
+
+ fw_status = ioread8(&pdsc->info_regs->fw_status);
/* is PCI broken? */
if (fw_status != PDS_RC_BAD_PCI)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index e35d3e7006bf..110c4b826b22 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -184,6 +184,7 @@ struct pdsc {
struct mutex devcmd_lock; /* lock for dev_cmd operations */
struct mutex config_lock; /* lock for configuration operations */
spinlock_t adminq_lock; /* lock for adminq operations */
+ refcount_t adminq_refcnt;
struct pds_core_dev_info_regs __iomem *info_regs;
struct pds_core_dev_cmd_regs __iomem *cmd_regs;
struct pds_core_intr __iomem *intr_ctrl;
@@ -280,7 +281,6 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
union pds_core_dev_comp *comp, int max_seconds);
int pdsc_devcmd_init(struct pdsc *pdsc);
int pdsc_devcmd_reset(struct pdsc *pdsc);
-int pdsc_dev_reinit(struct pdsc *pdsc);
int pdsc_dev_init(struct pdsc *pdsc);
void pdsc_reset_prepare(struct pci_dev *pdev);
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index 8ec392299b7d..4e8579ca1c8c 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -64,6 +64,10 @@ DEFINE_SHOW_ATTRIBUTE(identity);
void pdsc_debugfs_add_ident(struct pdsc *pdsc)
{
+ /* This file will already exist in the reset flow */
+ if (debugfs_lookup("identity", pdsc->dentry))
+ return;
+
debugfs_create_file("identity", 0400, pdsc->dentry,
pdsc, &identity_fops);
}
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index 31940b857e0e..e65a1632df50 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -57,6 +57,9 @@ int pdsc_err_to_errno(enum pds_core_status_code code)
bool pdsc_is_fw_running(struct pdsc *pdsc)
{
+ if (!pdsc->info_regs)
+ return false;
+
pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status);
pdsc->last_fw_time = jiffies;
pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat);
@@ -182,13 +185,17 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
{
int err;
+ if (!pdsc->cmd_regs)
+ return -ENXIO;
+
memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
pdsc_devcmd_dbell(pdsc);
err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds);
- memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq)
queue_work(pdsc->wq, &pdsc->health_work);
+ else
+ memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
return err;
}
@@ -309,13 +316,6 @@ static int pdsc_identify(struct pdsc *pdsc)
return 0;
}
-int pdsc_dev_reinit(struct pdsc *pdsc)
-{
- pdsc_init_devinfo(pdsc);
-
- return pdsc_identify(pdsc);
-}
-
int pdsc_dev_init(struct pdsc *pdsc)
{
unsigned int nintrs;
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index e9948ea5bbcd..54864f27c87a 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -111,7 +111,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
mutex_lock(&pdsc->devcmd_lock);
err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2);
- memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
+ if (!err)
+ memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
mutex_unlock(&pdsc->devcmd_lock);
if (err && err != -EIO)
return err;
diff --git a/drivers/net/ethernet/amd/pds_core/fw.c b/drivers/net/ethernet/amd/pds_core/fw.c
index 90a811f3878a..fa626719e68d 100644
--- a/drivers/net/ethernet/amd/pds_core/fw.c
+++ b/drivers/net/ethernet/amd/pds_core/fw.c
@@ -107,6 +107,9 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
dev_info(pdsc->dev, "Installing firmware\n");
+ if (!pdsc->cmd_regs)
+ return -ENXIO;
+
dl = priv_to_devlink(pdsc);
devlink_flash_update_status_notify(dl, "Preparing to flash",
NULL, 0, 0);
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 3080898d7b95..0050c5894563 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -37,6 +37,11 @@ static void pdsc_unmap_bars(struct pdsc *pdsc)
struct pdsc_dev_bar *bars = pdsc->bars;
unsigned int i;
+ pdsc->info_regs = NULL;
+ pdsc->cmd_regs = NULL;
+ pdsc->intr_status = NULL;
+ pdsc->intr_ctrl = NULL;
+
for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
if (bars[i].vaddr)
pci_iounmap(pdsc->pdev, bars[i].vaddr);
@@ -293,7 +298,7 @@ err_out_stop:
err_out_teardown:
pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
err_out_unmap_bars:
- del_timer_sync(&pdsc->wdtimer);
+ timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
destroy_workqueue(pdsc->wq);
mutex_destroy(&pdsc->config_lock);
@@ -420,7 +425,7 @@ static void pdsc_remove(struct pci_dev *pdev)
*/
pdsc_sriov_configure(pdev, 0);
- del_timer_sync(&pdsc->wdtimer);
+ timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->wq)
destroy_workqueue(pdsc->wq);
@@ -433,7 +438,6 @@ static void pdsc_remove(struct pci_dev *pdev)
mutex_destroy(&pdsc->config_lock);
mutex_destroy(&pdsc->devcmd_lock);
- pci_free_irq_vectors(pdev);
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
}
@@ -445,13 +449,32 @@ static void pdsc_remove(struct pci_dev *pdev)
devlink_free(dl);
}
+static void pdsc_stop_health_thread(struct pdsc *pdsc)
+{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
+ timer_shutdown_sync(&pdsc->wdtimer);
+ if (pdsc->health_work.func)
+ cancel_work_sync(&pdsc->health_work);
+}
+
+static void pdsc_restart_health_thread(struct pdsc *pdsc)
+{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
+ timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
+ mod_timer(&pdsc->wdtimer, jiffies + 1);
+}
+
void pdsc_reset_prepare(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
+ pdsc_stop_health_thread(pdsc);
pdsc_fw_down(pdsc);
- pci_free_irq_vectors(pdev);
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -486,6 +509,7 @@ void pdsc_reset_done(struct pci_dev *pdev)
}
pdsc_fw_up(pdsc);
+ pdsc_restart_health_thread(pdsc);
}
static const struct pci_error_handlers pdsc_err_handler = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index abd4832e4ed2..5acb3e16b567 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -993,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
return 0;
err_exit_hwts_rx:
- aq_ring_free(&aq_ptp->hwts_rx);
+ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
err_exit_ptp_rx:
aq_ring_free(&aq_ptp->ptp_rx);
err_exit_ptp_tx:
@@ -1011,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
aq_ring_free(&aq_ptp->ptp_tx);
aq_ring_free(&aq_ptp->ptp_rx);
- aq_ring_free(&aq_ptp->hwts_rx);
+ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index cda8597b4e14..f7433abd6591 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -919,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self)
}
}
+void aq_ring_hwts_rx_free(struct aq_ring_s *self)
+{
+ if (!self)
+ return;
+
+ if (self->dx_ring) {
+ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+ self->size * self->dx_size + AQ_CFG_RXDS_DEF,
+ self->dx_ring, self->dx_ring_pa);
+ self->dx_ring = NULL;
+ }
+}
+
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
{
unsigned int count;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 52847310740a..d627ace850ff 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -210,6 +210,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self);
int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic, unsigned int idx,
unsigned int size, unsigned int dx_size);
+void aq_ring_hwts_rx_free(struct aq_ring_s *self);
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 29b04a274d07..80245c65cc90 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -535,9 +535,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
int j = 0, i;
for (i = 0; i < NUM_NET_FILTERS; i++) {
- if (j == *rule_cnt)
- return -EMSGSIZE;
-
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -547,6 +544,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
priv->net_filters[i - 1].wake_filter)
continue;
+ if (j == *rule_cnt)
+ return -EMSGSIZE;
+
rule_locs[j++] = priv->net_filters[i].fs.location;
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 53e542881255..6ad1366270f7 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -684,6 +684,8 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
+ if (!buffer_pg)
+ return -ENOMEM;
dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
DMA_FROM_DEVICE);
@@ -1048,6 +1050,9 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
netdev_err(dev, "could not attach to PHY\n");
goto err_phy_disable;
}
+
+ /* Indicate that the MAC is responsible for PHY PM */
+ phydev->mac_managed_pm = true;
} else if (!intf->wolopts) {
ret = phy_resume(dev->phydev);
if (ret)
@@ -1092,6 +1097,7 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
return 0;
err_reclaim_tx:
+ netif_napi_del(&intf->tx_napi);
bcmasp_reclaim_free_all_tx(intf);
err_phy_disconnect:
if (phydev)
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 3e7c8671cd11..72df1bb10172 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -793,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
};
module_platform_driver(bcm4908_enet_driver);
+MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 9b83d5361699..50b8e97a811d 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -260,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 6e4f36aaf5db..36f9bad28e6a 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -362,4 +362,5 @@ module_init(bgmac_init)
module_exit(bgmac_exit)
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 0b21fd5bd457..77425c7a32db 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -298,4 +298,5 @@ static struct platform_driver bgmac_enet_driver = {
};
module_platform_driver(bgmac_enet_driver);
+MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 448a1b90de5e..6ffdc4229407 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1626,4 +1626,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
EXPORT_SYMBOL_GPL(bgmac_enet_resume);
MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 0aacd3c6ed5c..39845d556baf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3817,7 +3817,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
int i, j, rc, ulp_base_vec, ulp_msix;
- int tcs = netdev_get_num_tc(bp->dev);
+ int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
@@ -5935,8 +5935,12 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ if (!rx_rings)
+ return 0;
+ return bnxt_calc_nr_ring_pages(rx_rings - 1,
+ BNXT_RSS_TABLE_ENTRIES_P5);
+ }
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return 2;
return 1;
@@ -6926,7 +6930,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
if (cp < (rx + tx)) {
rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
if (rc)
- return rc;
+ goto get_rings_exit;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
hw_resc->resv_rx_rings = rx;
@@ -6938,8 +6942,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_cp_rings = cp;
hw_resc->resv_stat_ctxs = stats;
}
+get_rings_exit:
hwrm_req_drop(bp, req);
- return 0;
+ return rc;
}
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@@ -7000,10 +7005,11 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req->num_rx_rings = cpu_to_le16(rx_rings);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req->num_msix = cpu_to_le16(cp_rings);
- req->num_rsscos_ctxs =
- cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -7050,8 +7056,10 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req->num_tx_rings = cpu_to_le16(tx_rings);
req->num_rx_rings = cpu_to_le16(rx_rings);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+ req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -9938,7 +9946,7 @@ static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
{
- int tcs = netdev_get_num_tc(bp->dev);
+ int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
@@ -9947,7 +9955,7 @@ int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
{
- int tcs = netdev_get_num_tc(bp->dev);
+ int tcs = bp->num_tc;
return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
bp->tx_nr_rings_xdp;
@@ -9977,7 +9985,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
struct net_device *dev = bp->dev;
int tcs, i;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
if (tcs) {
int i, off, count;
@@ -10009,8 +10017,10 @@ static void bnxt_setup_inta(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
- if (netdev_get_num_tc(bp->dev))
+ if (bp->num_tc) {
netdev_reset_tc(bp->dev);
+ bp->num_tc = 0;
+ }
snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
0);
@@ -10236,8 +10246,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
- int tcs = netdev_get_num_tc(bp->dev);
bool irq_cleared = false;
+ int tcs = bp->num_tc;
int rc;
if (!bnxt_need_reserve_rings(bp))
@@ -10263,6 +10273,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
+ bp->num_tc = 0;
if (bp->tx_nr_rings_xdp)
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
else
@@ -11564,10 +11575,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
+ bnxt_init_napi(bp);
set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
rc = bnxt_init_nic(bp, true);
if (rc) {
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ bnxt_del_napi(bp);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@@ -11586,6 +11599,7 @@ half_open_err:
void bnxt_half_close_nic(struct bnxt *bp)
{
bnxt_hwrm_resource_free(bp, false, true);
+ bnxt_del_napi(bp);
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
@@ -13232,6 +13246,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
bp->fw_cap = 0;
rc = bnxt_hwrm_ver_get(bp);
+ /* FW may be unresponsive after FLR. FLR must complete within 100 msec
+ * so wait before continuing with recovery.
+ */
+ if (rc)
+ msleep(100);
bnxt_try_map_fw_health_reg(bp);
if (rc) {
rc = bnxt_try_recover_fw(bp);
@@ -13784,7 +13803,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
- if (netdev_get_num_tc(dev) == tc)
+ if (bp->num_tc == tc)
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -13802,9 +13821,11 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (tc) {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
netdev_set_num_tc(dev, tc);
+ bp->num_tc = tc;
} else {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
+ bp->num_tc = 0;
}
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index b8ef1717cb65..47338b48ca20 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2225,6 +2225,7 @@ struct bnxt {
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
+ u8 num_tc;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 63e067038385..0dbb880a7aa0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -228,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
}
}
if (bp->ieee_ets) {
- int tc = netdev_get_num_tc(bp->dev);
+ int tc = bp->num_tc;
if (!tc)
tc = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 27b983c0a8a9..dc4ca706b0e2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -884,7 +884,7 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
tx_grps = max(tcs, 1);
if (bp->tx_nr_rings_xdp)
tx_grps++;
@@ -944,7 +944,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (channel->combined_count)
sh = true;
- tcs = netdev_get_num_tc(dev);
+ tcs = bp->num_tc;
req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@@ -1574,7 +1574,8 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+ return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+ BNXT_RSS_TABLE_ENTRIES_P5;
return HW_HASH_INDEX_SIZE;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index adad188e38b8..cc07660330f5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -684,7 +684,7 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
timestamp.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(ptp->tx_skb, &timestamp);
} else {
- netdev_WARN_ONCE(bp->dev,
+ netdev_warn_once(bp->dev,
"TS query for TX timer failed rc = %x\n", rc);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c2b25fc623ec..4079538bc310 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -407,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog)
tx_xdp = bp->rx_nr_rings;
- tc = netdev_get_num_tc(dev);
+ tc = bp->num_tc;
if (!tc)
tc = 1;
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 31191b520b58..c32174484a96 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1091,10 +1091,10 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
* Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
*/
static void
-bnad_tx_cleanup(struct delayed_work *work)
+bnad_tx_cleanup(struct work_struct *work)
{
struct bnad_tx_info *tx_info =
- container_of(work, struct bnad_tx_info, tx_cleanup_work);
+ container_of(work, struct bnad_tx_info, tx_cleanup_work.work);
struct bnad *bnad = NULL;
struct bna_tcb *tcb;
unsigned long flags;
@@ -1170,7 +1170,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
* Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
*/
static void
-bnad_rx_cleanup(void *work)
+bnad_rx_cleanup(struct work_struct *work)
{
struct bnad_rx_info *rx_info =
container_of(work, struct bnad_rx_info, rx_cleanup_work);
@@ -1991,8 +1991,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
}
tx_info->tx = tx;
- INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
- (work_func_t)bnad_tx_cleanup);
+ INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup);
/* Register ISR for the Tx object */
if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2248,8 +2247,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
rx_info->rx = rx;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- INIT_WORK(&rx_info->rx_cleanup_work,
- (work_func_t)(bnad_rx_cleanup));
+ INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup);
/*
* Init NAPI, so that state is set to NAPI_STATE_SCHED,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 9cc6303c82ff..f38d31bfab1b 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -27,6 +27,7 @@
#include "octeon_network.h"
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
MODULE_LICENSE("GPL");
/* OOM task polling interval */
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 1c2a540db13d..1f495cfd7959 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -868,5 +868,6 @@ static struct platform_driver ep93xx_eth_driver = {
module_platform_driver(ep93xx_eth_driver);
+MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 20fcb20b42ed..66b577835338 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
tlv->type = htons(type);
tlv->length = htons(length);
- memcpy(tlv->value, value, length);
+ unsafe_memcpy(tlv->value, value, length,
+ /* Flexible array of flexible arrays */);
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
vp->length = htonl(ntohl(vp->length) +
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index df40c720e7b2..64eadd320798 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -719,17 +719,25 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
struct xdp_buff *xdp,
- struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx,
+ bool zc)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
bool xmit;
+ u32 type;
if (unlikely(!xdpf))
return false;
+ /* no page pool for zero copy */
+ if (zc)
+ type = TSNEP_TX_TYPE_XDP_NDO;
+ else
+ type = TSNEP_TX_TYPE_XDP_TX;
+
__netif_tx_lock(tx_nq, smp_processor_id());
- xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
+ xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type);
/* Avoid transmit queue timeout since we share it with the slow path */
if (xmit)
@@ -1273,7 +1281,7 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
case XDP_PASS:
return false;
case XDP_TX:
- if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false))
goto out_failure;
*status |= TSNEP_XDP_TX;
return true;
@@ -1323,7 +1331,7 @@ static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
case XDP_PASS:
return false;
case XDP_TX:
- if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true))
goto out_failure;
*status |= TSNEP_XDP_TX;
return true;
@@ -1485,7 +1493,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
xdp_prepare_buff(&xdp, page_address(entry->page),
XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
- length, false);
+ length - ETH_FCS_LEN, false);
consume = tsnep_xdp_run_prog(rx, prog, &xdp,
&xdp_status, tx_nq, tx);
@@ -1568,7 +1576,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
prefetch(entry->xdp->data);
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
- xsk_buff_set_size(entry->xdp, length);
+ xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
/* RX metadata with timestamps is in front of actual data,
@@ -1762,6 +1770,19 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
allocated--;
}
}
+
+ /* set need wakeup flag immediately if ring is not filled completely,
+ * first polling would be too late as need wakeup signalisation would
+ * be delayed for an indefinite time
+ */
+ if (xsk_uses_need_wakeup(rx->xsk_pool)) {
+ int desc_available = tsnep_rx_desc_available(rx);
+
+ if (desc_available)
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ }
}
static bool tsnep_pending(struct tsnep_queue *queue)
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 07c2b701b5fa..9ebe751c1df0 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -661,4 +661,5 @@ static struct platform_driver nps_enet_driver = {
module_platform_driver(nps_enet_driver);
MODULE_AUTHOR("EZchip Semiconductor");
+MODULE_DESCRIPTION("EZchip NPS Ethernet driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index cffbf27c4656..bfdbdab443ae 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -3216,4 +3216,5 @@ void enetc_pci_remove(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(enetc_pci_remove);
+MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d42594f32275..432523b2c789 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2036,6 +2036,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
/* if any of the above changed restart the FEC */
if (status_change) {
+ netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
@@ -2045,6 +2046,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
}
} else {
if (fep->link) {
+ netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_stop(ndev);
@@ -4769,4 +4771,5 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
+MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 9ba15d3183d7..758535adc9ff 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1073,6 +1073,14 @@ int memac_initialization(struct mac_device *mac_dev,
unsigned long capabilities;
unsigned long *supported;
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * 10GBASE-R (aka XFI), so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
+
mac_dev->phylink_ops = &memac_mac_ops;
mac_dev->set_promisc = memac_set_promiscuous;
mac_dev->change_addr = memac_modify_mac_address;
@@ -1139,7 +1147,7 @@ int memac_initialization(struct mac_device *mac_dev,
* (and therefore that xfi_pcs cannot be set). If we are defaulting to
* XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
*/
- if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_10GBASER)
memac->xfi_pcs = pcs;
else
memac->sgmii_pcs = pcs;
@@ -1153,14 +1161,6 @@ int memac_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
- /* The internal connection to the serdes is XGMII, but this isn't
- * really correct for the phy mode (which is the external connection).
- * However, this is how all older device trees say that they want
- * 10GBASE-R (aka XFI), so just convert it for them.
- */
- if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
- mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
-
/* TODO: The following interface modes are supported by (some) hardware
* but not by this driver:
* - 1000BASE-KX
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 70dd982a5edc..026f7270a54d 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -531,4 +531,5 @@ static struct platform_driver fsl_pq_mdio_driver = {
module_platform_driver(fsl_pq_mdio_driver);
+MODULE_DESCRIPTION("Freescale PQ MDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 7a8dc5386fff..76615d47e055 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -356,7 +356,7 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
- u16 packet_buffer_size, u16 len,
+ unsigned int truesize, u16 len,
struct gve_rx_ctx *ctx)
{
u32 offset = page_info->page_offset + page_info->pad;
@@ -389,10 +389,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
if (skb != ctx->skb_head) {
ctx->skb_head->len += len;
ctx->skb_head->data_len += len;
- ctx->skb_head->truesize += packet_buffer_size;
+ ctx->skb_head->truesize += truesize;
}
skb_add_rx_frag(skb, num_frags, page_info->page,
- offset, len, packet_buffer_size);
+ offset, len, truesize);
return ctx->skb_head;
}
@@ -486,7 +486,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
memcpy(alloc_page_info.page_address, src, page_info->pad + len);
skb = gve_rx_add_frags(napi, &alloc_page_info,
- rx->packet_buffer_size,
+ PAGE_SIZE,
len, ctx);
u64_stats_update_begin(&rx->statss);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index a187582d2299..ba9c19e6994c 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -360,23 +360,43 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
* As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
* INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
* bits to count nanoseconds leaving the rest for fractional nonseconds.
+ *
+ * Any given INCVALUE also has an associated maximum adjustment value. This
+ * maximum adjustment value is the largest increase (or decrease) which can be
+ * safely applied without overflowing the INCVALUE. Since INCVALUE has
+ * a maximum range of 24 bits, its largest value is 0xFFFFFF.
+ *
+ * To understand where the maximum value comes from, consider the following
+ * equation:
+ *
+ * new_incval = base_incval + (base_incval * adjustment) / 1billion
+ *
+ * To avoid overflow that means:
+ * max_incval = base_incval + (base_incval * max_adj) / billion
+ *
+ * Re-arranging:
+ * max_adj = floor(((max_incval - base_incval) * 1billion) / 1billion)
*/
#define INCVALUE_96MHZ 125
#define INCVALUE_SHIFT_96MHZ 17
#define INCPERIOD_SHIFT_96MHZ 2
#define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ)
+#define MAX_PPB_96MHZ 23999900 /* 23,999,900 ppb */
#define INCVALUE_25MHZ 40
#define INCVALUE_SHIFT_25MHZ 18
#define INCPERIOD_25MHZ 1
+#define MAX_PPB_25MHZ 599999900 /* 599,999,900 ppb */
#define INCVALUE_24MHZ 125
#define INCVALUE_SHIFT_24MHZ 14
#define INCPERIOD_24MHZ 3
+#define MAX_PPB_24MHZ 999999999 /* 999,999,999 ppb */
#define INCVALUE_38400KHZ 26
#define INCVALUE_SHIFT_38400KHZ 19
#define INCPERIOD_38400KHZ 1
+#define MAX_PPB_38400KHZ 230769100 /* 230,769,100 ppb */
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 02d871bc112a..bbcfd529399b 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -280,8 +280,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch2lan:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
+ break;
case e1000_pch_lpt:
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+ adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ;
+ else
+ adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
+ break;
case e1000_pch_spt:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+ break;
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
@@ -289,15 +298,14 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_lnp:
case e1000_pch_ptp:
case e1000_pch_nvp:
- if ((hw->mac.type < e1000_pch_lpt) ||
- (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
- adapter->ptp_clock_info.max_adj = 24000000 - 1;
- break;
- }
- fallthrough;
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
+ adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
+ else
+ adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+ break;
case e1000_82574:
case e1000_82583:
- adapter->ptp_clock_info.max_adj = 600000000 - 1;
+ adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 9d88ed6105fd..8db1eb0c1768 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1523,7 +1523,7 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
reg = rd32(hw, I40E_PRTDCB_RETSTCC(i));
reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
- I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK);
reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK,
bw_share[i]);
reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 6b60dc9b7736..d76497566e40 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -43,7 +43,7 @@
#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
#define I40E_LLDP_TLV_OUI_SHIFT 8
-#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT)
/* Defines for IEEE ETS TLV */
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ae8f9f135725..54eb55464e31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3588,40 +3588,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
struct i40e_hmc_obj_rxq rx_ctx;
int err = 0;
bool ok;
- int ret;
bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(rx_ctx));
- if (ring->vsi->type == I40E_VSI_MAIN)
- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ ring->rx_buf_len = vsi->rx_buf_len;
+
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (ring->vsi->type != I40E_VSI_MAIN)
+ goto skip;
+
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
- ring->rx_buf_len =
- xsk_pool_get_rx_frame_size(ring->xsk_pool);
- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
- if (ret)
- return ret;
+ if (err)
+ return err;
dev_info(&vsi->back->pdev->dev,
"Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->queue_index);
} else {
- ring->rx_buf_len = vsi->rx_buf_len;
- if (ring->vsi->type == I40E_VSI_MAIN) {
- ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (ret)
- return ret;
- }
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ return err;
}
+skip:
xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
@@ -4911,27 +4926,23 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int pf_q, err, q_end;
+ u32 pf_q, tx_q_end, rx_q_end;
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
- q_end = vsi->base_queue + vsi->num_queue_pairs;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
- i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+ tx_q_end = vsi->base_queue +
+ vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false);
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
- err = i40e_control_wait_rx_q(pf, pf_q, false);
- if (err)
- dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d disable timeout\n",
- vsi->seid, pf_q);
- }
+ rx_q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++)
+ i40e_control_rx_q(pf, pf_q, false);
msleep(I40E_DISABLE_TX_GAP_MSEC);
- pf_q = vsi->base_queue;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
i40e_vsi_wait_queues_disabled(vsi);
@@ -5345,7 +5356,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
{
int v, ret = 0;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v]) {
ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
if (ret)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 971ba3322038..0d7177083708 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1548,7 +1548,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int err;
u64_stats_init(&rx_ring->syncp);
@@ -1569,14 +1568,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_process = 0;
rx_ring->next_to_use = 0;
- /* XDP RX-queue info only needed for RX rings exposed to XDP */
- if (rx_ring->vsi->type == I40E_VSI_MAIN) {
- err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
- if (err < 0)
- return err;
- }
-
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
rx_ring->rx_bi =
@@ -2087,7 +2078,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
struct xdp_buff *xdp)
{
- u32 next = rx_ring->next_to_clean;
+ u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ u32 next = rx_ring->next_to_clean, i = 0;
struct i40e_rx_buffer *rx_buffer;
xdp->flags = 0;
@@ -2100,10 +2092,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
if (!rx_buffer->page)
continue;
- if (xdp_res == I40E_XDP_CONSUMED)
- rx_buffer->pagecnt_bias++;
- else
+ if (xdp_res != I40E_XDP_CONSUMED)
i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
+ else if (i++ <= nr_frags)
+ rx_buffer->pagecnt_bias++;
/* EOP buffer will be put in i40e_clean_rx_irq() */
if (next == rx_ring->next_to_process)
@@ -2117,20 +2109,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
* i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
- struct xdp_buff *xdp,
- u32 nr_frags)
+ struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
struct i40e_rx_buffer *rx_buffer;
+ struct skb_shared_info *sinfo;
unsigned int headlen;
struct sk_buff *skb;
+ u32 nr_frags = 0;
/* prefetch first cache line of first page */
net_prefetch(xdp->data);
@@ -2168,6 +2160,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(__skb_put(skb, headlen), xdp->data,
ALIGN(headlen, sizeof(long)));
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
/* update all of the pointers */
size -= headlen;
@@ -2187,9 +2183,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
}
if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
- sinfo = xdp_get_shared_info_from_buff(xdp);
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
@@ -2212,17 +2207,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
- struct xdp_buff *xdp,
- u32 nr_frags)
+ struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
+ struct skb_shared_info *sinfo;
struct sk_buff *skb;
+ u32 nr_frags;
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
@@ -2231,6 +2226,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
*/
net_prefetch(xdp->data_meta);
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
+
/* build an skb around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
@@ -2243,9 +2243,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
xdp_update_skb_shared_info(skb, nr_frags,
sinfo->xdp_frags_size,
nr_frags * xdp->frame_sz,
@@ -2589,9 +2586,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
total_rx_bytes += size;
} else {
if (ring_uses_build_skb(rx_ring))
- skb = i40e_build_skb(rx_ring, xdp, nfrags);
+ skb = i40e_build_skb(rx_ring, xdp);
else
- skb = i40e_construct_skb(rx_ring, xdp, nfrags);
+ skb = i40e_construct_skb(rx_ring, xdp);
/* drop if we failed to retrieve a buffer */
if (!skb) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 908cdbd3ec5d..b34c71770887 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2848,6 +2848,24 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+/**
+ * i40e_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !vf->trusted)
+ return false;
+
+ return true;
+}
+
#define I40E_MAX_MACVLAN_PER_HW 3072
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
(num_ports))
@@ -2907,8 +2925,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
+ if (!i40e_can_vf_change_mac(vf) &&
+ !is_multicast_ether_addr(addr) &&
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
@@ -3114,19 +3132,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = -EINVAL;
goto error_param;
}
- if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
- was_unimac_deleted = true;
}
vsi = pf->vsi[vf->lan_vsi_idx];
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
- for (i = 0; i < al->num_elements; i++)
+ for (i = 0; i < al->num_elements; i++) {
+ const u8 *addr = al->list[i].addr;
+
+ /* Allow to delete VF primary MAC only if it was not set
+ * administratively by PF or if VF is trusted.
+ */
+ if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
+ i40e_can_vf_change_mac(vf))
+ was_unimac_deleted = true;
+ else
+ continue;
+
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = -EINVAL;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
+ }
spin_unlock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index af7d5fa6cdc1..11500003af0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -414,7 +414,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start), 0, size);
+ virt_to_page(xdp->data_hard_start),
+ XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@@ -498,7 +499,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
&rx_bytes, xdp_res, &failure);
- first->flags = 0;
next_to_clean = next_to_process;
if (failure)
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 533b923cae2d..c979192e44d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector = vsi->q_vectors[v_idx];
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
- if (vsi->netdev)
- netif_queue_set_napi(vsi->netdev, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, NULL);
+ ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
tx_ring->q_vector = NULL;
}
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
- if (vsi->netdev)
- netif_queue_set_napi(vsi->netdev, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, NULL);
+ ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
rx_ring->q_vector = NULL;
}
@@ -547,19 +545,27 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
if (ring->vsi->type == ICE_VSI_PF) {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
- ring->q_index,
- ring->q_vector->napi.napi_id,
- ring->vsi->rx_buf_len);
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ xdp_rxq_info_unreg(&ring->xdp_rxq);
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
@@ -571,13 +577,14 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- __xdp_rxq_info_reg(&ring->xdp_rxq,
- ring->netdev,
- ring->q_index,
- ring->q_vector->napi.napi_id,
- ring->vsi->rx_buf_len);
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->rx_buf_len);
+ if (err)
+ return err;
+ }
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index b9c5eced6326..adfa1f2a80a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -31,6 +31,26 @@ static const char * const pin_type_name[] = {
};
/**
+ * ice_dpll_is_reset - check if reset is in progress
+ * @pf: private board structure
+ * @extack: error reporting
+ *
+ * If reset is in progress, fill extack with error.
+ *
+ * Return:
+ * * false - no reset in progress
+ * * true - reset in progress
+ */
+static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack)
+{
+ if (ice_is_reset_in_progress(pf->state)) {
+ NL_SET_ERR_MSG(extack, "PF reset in progress");
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_dpll_pin_freq_set - set pin's frequency
* @pf: private board structure
* @pin: pointer to a pin
@@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
mutex_unlock(&pf->dplls.lock);
@@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
+ * @dpll_idx: dpll index to connect to output pin
* @pin_type: type of pin being enabled
* @extack: error reporting
*
@@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
*/
static int
ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
- enum ice_dpll_pin_type pin_type,
+ u8 dpll_idx, enum ice_dpll_pin_type pin_type,
struct netlink_ext_ack *extack)
{
u8 flags = 0;
@@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL;
if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
- ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx,
+ 0, 0);
break;
default:
return -EINVAL;
@@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
case ICE_DPLL_PIN_TYPE_INPUT:
ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
NULL, &pin->flags[0],
- &pin->freq, NULL);
+ &pin->freq, &pin->phase_adjust);
if (ret)
goto err;
if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
@@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
- &pin->flags[0], NULL,
+ &pin->flags[0], &parent,
&pin->freq, NULL);
if (ret)
goto err;
- if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
- pin->state[0] = DPLL_PIN_STATE_CONNECTED;
- else
- pin->state[0] = DPLL_PIN_STATE_DISCONNECTED;
+
+ parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
+ if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ parent == pf->dplls.eec.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ parent == pf->dplls.pps.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
break;
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
for (parent = 0; parent < pf->dplls.rclk.num_parents;
@@ -568,9 +607,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
if (enable)
- ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack);
+ ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type,
+ extack);
else
ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
if (!ret)
@@ -603,6 +646,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
struct netlink_ext_ack *extack)
{
bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+
+ if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
+ return 0;
return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
extack, ICE_DPLL_PIN_TYPE_OUTPUT);
@@ -665,14 +713,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
if (ret)
goto unlock;
- if (pin_type == ICE_DPLL_PIN_TYPE_INPUT)
+ if (pin_type == ICE_DPLL_PIN_TYPE_INPUT ||
+ pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
*state = p->state[d->dpll_idx];
- else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
- *state = p->state[0];
ret = 0;
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -790,6 +840,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
mutex_unlock(&pf->dplls.lock);
@@ -910,6 +963,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
u8 flag, flags_en = 0;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
switch (type) {
case ICE_DPLL_PIN_TYPE_INPUT:
@@ -1069,6 +1125,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1123,6 +1182,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1305,8 +1367,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
- int ret;
+ int ret = 0;
+ if (ice_is_reset_in_progress(pf->state))
+ goto resched;
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
@@ -1326,6 +1390,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+resched:
/* Run twice a second or reschedule if update failed */
kthread_queue_delayed_work(d->kworker, &d->work,
ret ? msecs_to_jiffies(10) :
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 2a25323105e5..467372d541d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -152,6 +152,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport)
}
/**
+ * ice_pkg_has_lport_extract - check if lport extraction supported
+ * @hw: HW struct
+ */
+static bool ice_pkg_has_lport_extract(struct ice_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) {
+ u16 offset;
+ u8 fv_prot;
+
+ ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i,
+ &fv_prot, &offset);
+ if (fv_prot == ICE_FV_PROT_MDID &&
+ offset == ICE_LP_EXT_BUF_OFFSET)
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_lag_find_primary - returns pointer to primary interfaces lag struct
* @lag: local interfaces lag struct
*/
@@ -1206,7 +1227,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
}
/**
- * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG
* @pf: PF struct
*/
static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
@@ -1219,7 +1240,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
else
ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
- if (caps->sriov_lag)
+ if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw))
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index ede833dfa658..183b38792ef2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -17,6 +17,9 @@ enum ice_lag_role {
#define ICE_LAG_INVALID_PORT 0xFF
#define ICE_LAG_RESET_RETRIES 5
+#define ICE_SW_DEFAULT_PROFILE 0
+#define ICE_FV_PROT_MDID 255
+#define ICE_LP_EXT_BUF_OFFSET 32
struct ice_pf;
struct ice_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 9be724291ef8..097bf8fd6bf0 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2426,7 +2426,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ice_vsi_map_rings_to_vectors(vsi);
/* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi, true);
+ ice_vsi_set_napi_queues(vsi);
vsi->stat_offsets_loaded = false;
@@ -2904,19 +2904,19 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/**
- * ice_queue_set_napi - Set the napi instance for the queue
+ * __ice_queue_set_napi - Set the napi instance for the queue
* @dev: device to which NAPI and queue belong
* @queue_index: Index of queue
* @type: queue type as RX or TX
* @napi: NAPI context
* @locked: is the rtnl_lock already held
*
- * Set the napi instance for the queue
+ * Set the napi instance for the queue. Caller indicates the lock status.
*/
static void
-ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
- enum netdev_queue_type type, struct napi_struct *napi,
- bool locked)
+__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi,
+ bool locked)
{
if (!locked)
rtnl_lock();
@@ -2926,26 +2926,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
}
/**
- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @vsi: VSI being configured
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ *
+ * Set the napi instance for the queue. The rtnl lock state is derived from the
+ * execution path.
+ */
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ if (!vsi->netdev)
+ return;
+
+ if (current_work() == &pf->serv_task ||
+ test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
+ test_bit(ICE_DOWN, pf->state) ||
+ test_bit(ICE_SUSPENDED, pf->state))
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ false);
+ else
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ true);
+}
+
+/**
+ * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
* @q_vector: q_vector pointer
* @locked: is the rtnl_lock already held
*
+ * Associate the q_vector napi with all the queue[s] on the vector.
+ * Caller indicates the lock status.
+ */
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+ locked);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+ locked);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ *
* Associate the q_vector napi with all the queue[s] on the vector
*/
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
{
struct ice_rx_ring *rx_ring;
struct ice_tx_ring *tx_ring;
ice_for_each_rx_ring(rx_ring, q_vector->rx)
- ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
- locked);
+ ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
ice_for_each_tx_ring(tx_ring, q_vector->tx)
- ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
- locked);
+ ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
/* Also set the interrupt number for the NAPI */
netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
}
@@ -2953,11 +3006,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
/**
* ice_vsi_set_napi_queues
* @vsi: VSI pointer
- * @locked: is the rtnl_lock already held
*
* Associate queue[s] with napi for all vectors
*/
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
{
int i;
@@ -2965,7 +3017,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
return;
ice_for_each_q_vector(vsi, i)
- ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
+ ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 71bd27244941..bfcfc582a4c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -91,9 +91,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi);
+
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked);
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
int ice_vsi_release(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index dd4a9bc0dfdc..59c7e37f175f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3495,7 +3495,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) {
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll);
- ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+ __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
}
}
@@ -5447,6 +5447,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
if (ret)
goto err_reinit;
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ ice_vsi_set_napi_queues(pf->vsi[v]);
}
ret = ice_req_irq_msix_misc(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index 82bc54fec7f3..a2562f04267f 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -24,7 +24,7 @@
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define ice_flush(a) rd32((a), GLGEN_STAT)
-#define ICE_M(m, s) ((m) << (s))
+#define ICE_M(m, s) ((m ## U) << (s))
struct ice_dma_mem {
void *va;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 74d13cc5a3a7..97d41d6ebf1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
- if (rx_ring->vsi->type == ICE_VSI_PF &&
- !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
- goto err;
return 0;
err:
@@ -603,9 +598,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
- rx_buf->act = ret;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ ice_set_rx_bufs_act(xdp, rx_ring, ret);
}
/**
@@ -893,14 +886,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
}
if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
return -ENOMEM;
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
sinfo->xdp_frags_size += size;
+ /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
+ * can pop off frags but driver has to handle it on its own
+ */
+ rx_ring->nr_frags = sinfo->nr_frags;
if (page_is_pfmemalloc(rx_buf->page))
xdp_buff_set_frag_pfmemalloc(xdp);
@@ -1251,6 +1247,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1266,10 +1263,12 @@ construct_skb:
ICE_XDP_CONSUMED);
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
break;
}
xdp->data = NULL;
rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index b3379ff73674..af955b0e5dc5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -358,6 +358,7 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
+ u32 nr_frags;
dma_addr_t dma; /* physical address of ring */
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 762047508619..afcead4baef4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -12,26 +12,39 @@
* act: action to store onto Rx buffers related to XDP buffer parts
*
* Set action that should be taken before putting Rx buffer from first frag
- * to one before last. Last one is handled by caller of this function as it
- * is the EOP frag that is currently being processed. This function is
- * supposed to be called only when XDP buffer contains frags.
+ * to the last.
*/
static inline void
ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
const unsigned int act)
{
- const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
- u32 first = rx_ring->first_desc;
- u32 nr_frags = sinfo->nr_frags;
+ u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count;
struct ice_rx_buf *buf;
for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[first];
+ buf = &rx_ring->rx_buf[idx];
buf->act = act;
- if (++first == cnt)
- first = 0;
+ if (++idx == cnt)
+ idx = 0;
+ }
+
+ /* adjust pagecnt_bias on frags freed by XDP prog */
+ if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
+ u32 delta = rx_ring->nr_frags - sinfo_frags;
+
+ while (delta) {
+ if (idx == 0)
+ idx = cnt - 1;
+ else
+ idx--;
+ buf = &rx_ring->rx_buf[idx];
+ buf->pagecnt_bias--;
+ delta--;
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 41ab6d7bbd9e..a508e917ce5f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -1072,7 +1072,7 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_OROM_VER_BUILD_SHIFT 8
#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
#define ICE_OROM_VER_SHIFT 24
-#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
+#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
#define ICE_SR_NVM_BANK_SIZE 0x43
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 5d1ae8e4058a..8b81a1677045 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -825,7 +825,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start), 0, size);
+ virt_to_page(xdp->data_hard_start),
+ XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@@ -895,7 +896,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
if (!first) {
first = xdp;
- xdp_buff_clear_frags_flag(first);
} else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
break;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 5fea2fd957eb..58179bd733ff 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -783,6 +783,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
+ netdev->dev_port = idx;
+
/* configure default MTU size */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = vport->max_mtu;
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 8dc837889723..4a3c4454d25a 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -978,7 +978,7 @@ struct virtchnl2_ptype {
u8 proto_id_count;
__le16 pad;
__le16 proto_id[];
-};
+} __packed __aligned(2);
VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
/**
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index a2b759531cb7..3c2dc7bdebb5 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -637,7 +637,7 @@ struct igb_adapter {
struct timespec64 period;
} perout[IGB_N_PEROUT];
- char fw_version[32];
+ char fw_version[48];
#ifdef CONFIG_IGB_HWMON
struct hwmon_buff *igb_hwmon_buff;
bool ets;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 4df8d4153aa5..cebb44f51d5f 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3069,7 +3069,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_fw_version fw;
- char *lbuf;
igb_get_fw_version(hw, &fw);
@@ -3077,34 +3076,36 @@ void igb_set_fw_version(struct igb_adapter *adapter)
case e1000_i210:
case e1000_i211:
if (!(igb_get_flash_presence_i210(hw))) {
- lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor,
- fw.invm_img_type);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
break;
}
fallthrough;
default:
/* if option rom is valid, display its version too */
if (fw.or_valid) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id, fw.or_major, fw.or_build,
- fw.or_patch);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
} else if (fw.etrack_id != 0X0000) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
} else {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major,
- fw.eep_minor, fw.eep_build);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
-
- /* the truncate happens here if it doesn't fit */
- strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version));
- kfree(lbuf);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 319c544b9f04..f94570556120 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
/* adjust timestamp for the TX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_TX_LATENCY_10;
@@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
@@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_RX_LATENCY_10;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 7cd8716d2ffa..861f37076861 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -130,11 +130,7 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* Temporary workaround - should be removed when PHY will implement
- * IEEE registers as properly
- */
- /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
usleep_range(1000, 2000);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 6208923e29a2..c1adc94a5a65 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -716,7 +716,8 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command);
hw_dbg(hw, "Failed to read, error %x\n", error);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
if (!ret)
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index 5182fe737c37..ff54fbe41bcc 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -318,4 +318,5 @@ static struct platform_driver liteeth_driver = {
module_platform_driver(liteeth_driver);
MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 820b1fabe297..23adf53c2aa1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
}
+/* Cleanup pool before actual initialization in the OS */
+static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
+{
+ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
+ u32 val;
+ int i;
+
+ /* Drain the BM from all possible residues left by firmware */
+ for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
+ mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
+
+ put_cpu();
+
+ /* Stop the BM pool */
+ val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
+ val |= MVPP2_BM_STOP_MASK;
+ mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
+}
+
static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
{
enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
int i, err, poolnum = MVPP2_BM_POOLS_NUM;
struct mvpp2_port *port;
+ if (priv->percpu_pools)
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+ /* Clean up the pool state in case it contains stale state */
+ for (i = 0; i < poolnum; i++)
+ mvpp2_bm_pool_cleanup(priv, i);
+
if (priv->percpu_pools) {
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
@@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
}
}
- poolnum = mvpp2_get_nrxqs(priv) * 2;
for (i = 0; i < poolnum; i++) {
/* the pool in use */
int pn = i / (poolnum / 2);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 9690ac01f02c..b92264d0a77e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -413,4 +413,5 @@ const char *otx2_mbox_id2name(u16 id)
EXPORT_SYMBOL(otx2_mbox_id2name);
MODULE_AUTHOR("Marvell.");
+MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 167145bdcb75..516adb50f9f6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
return 0;
}
-static int npc_mcam_verify_pf_func(struct rvu *rvu,
- struct mcam_entry *entry_data, u8 intf,
- u16 pcifunc)
-{
- u16 pf_func, pf_func_mask;
-
- if (is_npc_intf_rx(intf))
- return 0;
-
- pf_func_mask = (entry_data->kw_mask[0] >> 32) &
- NPC_KEX_PF_FUNC_MASK;
- pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
-
- pf_func = be16_to_cpu((__force __be16)pf_func);
- if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
- ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
- (pcifunc & ~RVU_PFVF_FUNC_MASK)))
- return -EINVAL;
-
- return 0;
-}
-
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -437,6 +415,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
return;
}
+ /* AF modifies given action iff PF/VF has requested for it */
+ if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
+ return;
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -1850,8 +1832,8 @@ void npc_mcam_rsrcs_deinit(struct rvu *rvu)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- kfree(mcam->bmap);
- kfree(mcam->bmap_reverse);
+ bitmap_free(mcam->bmap);
+ bitmap_free(mcam->bmap_reverse);
kfree(mcam->entry2pfvf_map);
kfree(mcam->cntr2pfvf_map);
kfree(mcam->entry2cntr_map);
@@ -1904,21 +1886,20 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
/* Allocate bitmaps for managing MCAM entries */
- mcam->bmap = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap)
return -ENOMEM;
- mcam->bmap_reverse = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap_reverse)
goto free_bmap;
mcam->bmap_fcnt = mcam->bmap_entries;
/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
- mcam->entry2pfvf_map = kmalloc_array(mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
+
if (!mcam->entry2pfvf_map)
goto free_bmap_reverse;
@@ -1941,21 +1922,21 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
if (err)
goto free_entry_map;
- mcam->cntr2pfvf_map = kmalloc_array(mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr2pfvf_map)
goto free_cntr_bmap;
/* Alloc memory for MCAM entry to counter mapping and for tracking
* counter's reference count.
*/
- mcam->entry2cntr_map = kmalloc_array(mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->entry2cntr_map)
goto free_cntr_map;
- mcam->cntr_refcnt = kmalloc_array(mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr_refcnt)
goto free_entry_cntr_map;
@@ -1988,9 +1969,9 @@ free_cntr_bmap:
free_entry_map:
kfree(mcam->entry2pfvf_map);
free_bmap_reverse:
- kfree(mcam->bmap_reverse);
+ bitmap_free(mcam->bmap_reverse);
free_bmap:
- kfree(mcam->bmap);
+ bitmap_free(mcam->bmap);
return -ENOMEM;
}
@@ -2852,12 +2833,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
- if (!is_pffunc_af(pcifunc) &&
- npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
- rc = NPC_MCAM_INVALID_REQ;
- goto exit;
- }
-
/* For AF installed rules, the nix_intf should be set to target NIX */
if (is_pffunc_af(req->hdr.pcifunc))
nix_intf = req->intf;
@@ -3209,10 +3184,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (!is_npc_interface_valid(rvu, req->intf))
return NPC_MCAM_INVALID_REQ;
- if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
- req->hdr.pcifunc))
- return NPC_MCAM_INVALID_REQ;
-
/* Try to allocate a MCAM entry */
entry_req.hdr.pcifunc = req->hdr.pcifunc;
entry_req.contig = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 7ca6941ea0b9..02d0b707aea5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -951,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
- if (err)
+ if (err) {
+ kfree(sq->sg);
+ sq->sg = NULL;
return err;
+ }
}
sq->head = 0;
@@ -968,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
- return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ if (err) {
+ kfree(sq->sg);
+ sq->sg = NULL;
+ return err;
+ }
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 2928898c7f8d..7f786de61014 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -314,7 +314,6 @@ static int otx2_set_channels(struct net_device *dev,
pfvf->hw.tx_queues = channel->tx_count;
if (pfvf->xdp_prog)
pfvf->hw.xdp_queues = channel->rx_count;
- pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues;
if (if_up)
err = dev->netdev_ops->ndo_open(dev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index a57455aebff6..e5fe67e73865 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1744,6 +1744,7 @@ int otx2_open(struct net_device *netdev)
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
+ pf->hw.non_qos_queues = pf->hw.tx_queues + pf->hw.xdp_queues;
pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
pf->hw.tc_tx_queues);
@@ -2643,8 +2644,6 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
xdp_features_clear_redirect_target(dev);
}
- pf->hw.non_qos_queues += pf->hw.xdp_queues;
-
if (if_up)
otx2_open(pf->netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 4d519ea833b2..f828d32737af 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1403,7 +1403,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
bool *need_xdp_flush)
{
- unsigned char *hard_start, *data;
+ unsigned char *hard_start;
int qidx = cq->cq_idx;
struct xdp_buff xdp;
struct page *page;
@@ -1417,9 +1417,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
- data = (unsigned char *)phys_to_virt(pa);
- hard_start = page_address(page);
- xdp_prepare_buff(&xdp, hard_start, data - hard_start,
+ hard_start = (unsigned char *)phys_to_virt(pa);
+ xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
cqe->sg.seg_size, false);
act = bpf_prog_run_xdp(prog, &xdp);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index a6e91573f8da..de123350bd46 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -4761,7 +4761,10 @@ static int mtk_probe(struct platform_device *pdev)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
+ if (!err)
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+
if (err) {
dev_err(&pdev->dev, "Wrong DMA config\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a7b1f9686c09..4957412ff1f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
{
const char *namep = mlx5_command_str(opcode);
struct mlx5_cmd_stats *stats;
+ unsigned long flags;
if (!err || !(strcmp(namep, "unknown command opcode")))
return;
@@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats = xa_load(&dev->cmd.stats, opcode);
if (!stats)
return;
- spin_lock_irq(&stats->lock);
+ spin_lock_irqsave(&stats->lock, flags);
stats->failed++;
if (err < 0)
stats->last_failed_errno = -err;
@@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats->last_failed_mbox_status = status;
stats->last_failed_syndrome = syndrome;
}
- spin_unlock_irq(&stats->lock);
+ spin_unlock_irqrestore(&stats->lock, flags);
}
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 18fed2b34fb1..928bf24d4b12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -389,7 +389,7 @@ static void mlx5_dpll_remove(struct auxiliary_device *adev)
struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev);
struct mlx5_core_dev *mdev = mdpll->mdev;
- cancel_delayed_work(&mdpll->work);
+ cancel_delayed_work_sync(&mdpll->work);
mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
destroy_workqueue(mdpll->wq);
dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0bfe1ca8a364..55c6ace0acd5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1124,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
bool enable_mc_lb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index e1283531e0b8..671adbad0a40 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
+ ft->g = NULL;
kvfree(in);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 284253b79266..5d213a9886f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -1064,8 +1064,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
- allow_swp =
- mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
+ allow_swp = mlx5_geneve_tx_allowed(mdev) ||
+ (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index c206cc0a8483..078f56a3cbb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
- md_buff[*md_buff_sz++] = metadata_id;
+ md_buff[(*md_buff_sz)++] = metadata_id;
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 161c5190c236..05612d9c6080 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
+ attrs->dir = x->xso.dir;
+
/* esn */
if (x->props.flags & XFRM_STATE_ESN) {
attrs->replay_esn.trigger = true;
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ goto skip_replay_window;
+
switch (x->replay_esn->replay_window) {
case 32:
attrs->replay_esn.replay_window =
@@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
}
}
- attrs->dir = x->xso.dir;
+skip_replay_window:
/* spi */
attrs->spi = be32_to_cpu(x->id.spi);
@@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
- if (x->replay_esn && x->replay_esn->replay_window != 32 &&
+ if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
+ x->replay_esn->replay_window != 32 &&
x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index bb7f86c993e5..e66f486faafe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -254,11 +254,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in || !ft->g) {
- kfree(ft->g);
- kvfree(in);
+ if (!ft->g)
return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free_g;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
@@ -278,7 +280,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
- goto out;
+ goto err_free_in;
}
switch (type) {
@@ -300,7 +302,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
- goto out;
+ goto err_free_in;
}
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -309,7 +311,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
+ goto err_clean_group;
ft->num_groups++;
memset(in, 0, inlen);
@@ -318,18 +320,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err;
+ goto err_clean_group;
ft->num_groups++;
kvfree(in);
return 0;
-err:
+err_clean_group:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
-out:
+err_free_in:
kvfree(in);
-
+err_free_g:
+ kfree(ft->g);
+ ft->g = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 67f546683e85..6ed3a32b7e22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -95,7 +95,7 @@ static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PO
{
int tc, i;
- for (i = 0; i < MLX5_MAX_PORTS; i++)
+ for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
mlx5e_destroy_tis(mdev, tisn[i][tc]);
}
@@ -110,7 +110,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT
int tc, i;
int err;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc;
@@ -140,7 +140,7 @@ err_close_tises:
return err;
}
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
@@ -169,11 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_destroy_mkey;
}
- err = mlx5e_create_tises(mdev, res->tisn);
- if (err) {
- mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
- goto err_destroy_bfreg;
+ if (create_tises) {
+ err = mlx5e_create_tises(mdev, res->tisn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
+ goto err_destroy_bfreg;
+ }
+ res->tisn_valid = true;
}
+
INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock);
@@ -203,7 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
- mlx5e_destroy_tises(mdev, res->tisn);
+ if (res->tisn_valid)
+ mlx5e_destroy_tises(mdev, res->tisn);
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b5f1c4ca38ba..c8e8f512803e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5992,7 +5992,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
if (netif_device_present(netdev))
return 0;
- err = mlx5e_create_mdev_resources(mdev);
+ err = mlx5e_create_mdev_resources(mdev, true);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 30932c9c9a8f..9fb2c057bd78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
err = mlx5e_rss_params_indir_init(&indir, mdev,
mlx5e_rqt_size(mdev, hp->num_channels),
- mlx5e_rqt_size(mdev, priv->max_nch));
+ mlx5e_rqt_size(mdev, hp->num_channels));
if (err)
return err;
@@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
continue;
+
+ list_del(&peer_flow->peer_flows);
if (refcount_dec_and_test(&peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
- list_del(&peer_flow->peer_flows);
kfree(peer_flow);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
index a7ed87e9d842..22dd30cf8033 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
@@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
i++;
}
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
ether_addr_copy(dmac_v, entry->key.addr);
@@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec)
return ERR_PTR(-ENOMEM);
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id;
}
+ rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
kvfree(rule_spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 1616a6144f7b..9b8599c200e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
fte->flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source);
+ MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
+ !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 58845121954c..d77be1b4dd9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -783,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
}
/* This should only be called once per mdev */
- err = mlx5e_create_mdev_resources(mdev);
+ err = mlx5e_create_mdev_resources(mdev, false);
if (err)
goto destroy_ht;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 40c7be124041..58bd749b5e4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
- MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 6f9790e97fed..2ebb61ef3ea9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
switch (action_type) {
case DR_ACTION_TYP_DROP:
attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
break;
case DR_ACTION_TYP_FT:
dest_action = action;
@@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->sampler->tx_icm_addr;
break;
case DR_ACTION_TYP_VPORT:
- attr.hit_gvmi = action->vport->caps->vhca_gvmi;
- dest_action = action;
- attr.final_icm_addr = rx_rule ?
- action->vport->caps->icm_address_rx :
- action->vport->caps->icm_address_tx;
+ if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
+ /* can't go to uplink on RX rule - dropping instead */
+ attr.final_icm_addr = nic_dmn->drop_icm_addr;
+ attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+ } else {
+ attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+ dest_action = action;
+ attr.final_icm_addr = rx_rule ?
+ action->vport->caps->icm_address_rx :
+ action->vport->caps->icm_address_tx;
+ }
break;
case DR_ACTION_TYP_POP_VLAN:
if (!rx_rule && !(dmn->ste_ctx->actions_caps &
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 21753f327868..1005bb6935b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -440,6 +440,27 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 *out;
+ int err;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
+
+ *sd_group = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.sd_group);
+out:
+ kvfree(out);
+ return err;
+}
+
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
index 41fa2523d91d..5f2cd9a8cf8f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
@@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
/* Now, set PGIDs for each active LAG */
for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
- struct net_device *bond = lan966x->ports[lag]->bond;
+ struct lan966x_port *port = lan966x->ports[lag];
int num_active_ports = 0;
+ struct net_device *bond;
unsigned long bond_mask;
u8 aggr_idx[16];
- if (!bond || (visited & BIT(lag)))
+ if (!port || !port->bond || (visited & BIT(lag)))
continue;
+ bond = port->bond;
bond_mask = lan966x_lag_get_mask(lan966x, bond);
for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
struct lan966x_port *port = lan966x->ports[p];
+ if (!port)
+ continue;
+
lan_wr(ANA_PGID_PGID_SET(bond_mask),
lan966x, ANA_PGID(p));
if (port->lag_tx_active)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 92108d354051..2e83bbb9477e 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -168,9 +168,10 @@ static void lan966x_port_link_up(struct lan966x_port *port)
lan966x_taprio_speed_set(port, config->speed);
/* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
- * port speed for QSGMII ports.
+ * port speed for QSGMII or SGMII ports.
*/
- if (phy_interface_num_ports(config->portmode) == 4)
+ if (phy_interface_num_ports(config->portmode) == 4 ||
+ config->portmode == PHY_INTERFACE_MODE_SGMII)
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
lan_wr(config->duplex | mode,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index d1f7fc8b1b71..3c066b62e689 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sparx5);
sparx5->pdev = pdev;
sparx5->dev = &pdev->dev;
+ spin_lock_init(&sparx5->tx_lock);
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 6f565c0c0c3d..316fed5f2735 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -280,6 +280,7 @@ struct sparx5 {
int xtr_irq;
/* Frame DMA */
int fdma_irq;
+ spinlock_t tx_lock; /* lock for frame transmission */
struct sparx5_rx rx;
struct sparx5_tx tx;
/* PTP */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 6db6ac6a3bbc..ac7e1cffbcec 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
+ spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
ret = sparx5_fdma_xmit(sparx5, ifh, skb);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
+ spin_unlock(&sparx5->tx_lock);
if (ret == -EBUSY)
goto busy;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 2967bab72505..15180538b80a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1424,10 +1424,30 @@ static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_
mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
return;
+ /* Both struct tcphdr and struct udphdr start with
+ * __be16 source;
+ * __be16 dest;
+ * so we can use the same code for both.
+ */
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
- mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
- mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
+ if (mangle_action->mangle.offset == offsetof(struct tcphdr, source)) {
+ mangle_action->mangle.val =
+ (__force u32)cpu_to_be32(mangle_action->mangle.val << 16);
+ /* The mask of mangle action is inverse mask,
+ * so clear the dest tp port with 0xFFFF to
+ * instead of rotate-left operation.
+ */
+ mangle_action->mangle.mask =
+ (__force u32)cpu_to_be32(mangle_action->mangle.mask << 16 | 0xFFFF);
+ }
+ if (mangle_action->mangle.offset == offsetof(struct tcphdr, dest)) {
+ mangle_action->mangle.offset = 0;
+ mangle_action->mangle.val =
+ (__force u32)cpu_to_be32(mangle_action->mangle.val);
+ mangle_action->mangle.mask =
+ (__force u32)cpu_to_be32(mangle_action->mangle.mask);
+ }
return;
default:
@@ -1864,10 +1884,30 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct nfp_fl_ct_flow_entry *ct_entry;
+ struct flow_action_entry *ct_goto;
struct nfp_fl_ct_zone_entry *zt;
+ struct flow_action_entry *act;
bool wildcarded = false;
struct flow_match_ct ct;
- struct flow_action_entry *ct_goto;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_REDIRECT_INGRESS:
+ case FLOW_ACTION_MIRRED:
+ case FLOW_ACTION_MIRRED_INGRESS:
+ if (act->dev->rtnl_link_ops &&
+ !strcmp(act->dev->rtnl_link_ops->kind, "openvswitch")) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: out port is openvswitch internal port");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ break;
+ }
+ }
flow_rule_match_ct(rule, &ct);
if (!ct.mask->ct_zone) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index e522845c7c21..0d7d138d6e0d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -1084,7 +1084,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
u16 nfp_mac_idx = 0;
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
- if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
+ if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) {
if (entry->bridge_count ||
!nfp_flower_is_supported_bridge(netdev)) {
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 3b3210d823e8..f28e769e6fda 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2776,6 +2776,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
case NFP_NFD_VER_NFD3:
netdev->netdev_ops = &nfp_nfd3_netdev_ops;
netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
break;
case NFP_NFD_VER_NFDK:
netdev->netdev_ops = &nfp_nfdk_netdev_ops;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 33b4c2856316..3f10c5365c80 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
const u32 barcfg_msix_general =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
const u32 barcfg_msix_xpb =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) |
NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
NFP_CPP_TARGET_ISLAND_XPB);
const u32 barcfg_explicit[4] = {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index c49aa358e424..6ba8d4aca0a0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -93,6 +93,7 @@ static void ionic_unmap_bars(struct ionic *ionic)
bars[i].len = 0;
}
}
+ ionic->num_bars = 0;
}
void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num)
@@ -215,15 +216,17 @@ out:
static void ionic_clear_pci(struct ionic *ionic)
{
- ionic->idev.dev_info_regs = NULL;
- ionic->idev.dev_cmd_regs = NULL;
- ionic->idev.intr_status = NULL;
- ionic->idev.intr_ctrl = NULL;
-
- ionic_unmap_bars(ionic);
- pci_release_regions(ionic->pdev);
+ if (ionic->num_bars) {
+ ionic->idev.dev_info_regs = NULL;
+ ionic->idev.dev_cmd_regs = NULL;
+ ionic->idev.intr_status = NULL;
+ ionic->idev.intr_ctrl = NULL;
+
+ ionic_unmap_bars(ionic);
+ pci_release_regions(ionic->pdev);
+ }
- if (atomic_read(&ionic->pdev->enable_cnt) > 0)
+ if (pci_is_enabled(ionic->pdev))
pci_disable_device(ionic->pdev);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 1e7c71f7f081..746072b4dbd0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -319,22 +319,32 @@ do_check_time:
u8 ionic_dev_cmd_status(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return (u8)PCI_ERROR_RESPONSE;
return ioread8(&idev->dev_cmd_regs->comp.comp.status);
}
bool ionic_dev_cmd_done(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return false;
return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
}
void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
{
+ if (!idev->dev_cmd_regs)
+ return;
memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
}
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
{
idev->opcode = cmd->cmd.opcode;
+
+ if (!idev->dev_cmd_regs)
+ return;
+
memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
iowrite32(0, &idev->dev_cmd_regs->done);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index cd3c0b01402e..0ffc9c4904ac 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *p)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev;
unsigned int offset;
unsigned int size;
regs->version = IONIC_DEV_CMD_REG_VERSION;
+ idev = &lif->ionic->idev;
+ if (!idev->dev_info_regs)
+ return;
+
offset = 0;
size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
offset += size;
size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
- memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
+ memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size);
}
static void ionic_get_link_ext_stats(struct net_device *netdev,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
index 5f40324cd243..3c209c1a2337 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw,
dl = priv_to_devlink(ionic);
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+ if (!idev->dev_cmd_regs) {
+ err = -ENXIO;
+ goto err_out;
+ }
+
buf_sz = sizeof(idev->dev_cmd_regs->data);
netdev_dbg(netdev,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index cf2d5ad7b68c..fcb44ceeb6aa 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3559,7 +3559,10 @@ int ionic_lif_init(struct ionic_lif *lif)
goto err_out_notifyq_deinit;
}
- err = ionic_init_nic_features(lif);
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ err = ionic_set_nic_features(lif, lif->netdev->features);
+ else
+ err = ionic_init_nic_features(lif);
if (err)
goto err_out_notifyq_deinit;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 165ab08ad2dd..2f479de329fe 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -416,6 +416,9 @@ static void ionic_dev_cmd_clean(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
+ if (!idev->dev_cmd_regs)
+ return;
+
iowrite32(0, &idev->dev_cmd_regs->doorbell);
memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 54cd96b035d6..6f4776759863 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -579,6 +579,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done = ionic_cq_service(cq, budget,
ionic_tx_service, NULL, NULL);
+ if (unlikely(!budget))
+ return budget;
+
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -607,6 +610,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
u32 work_done = 0;
u32 flags = 0;
+ if (unlikely(!budget))
+ return budget;
+
lif = cq->bound_q->lif;
idev = &lif->ionic->idev;
@@ -656,6 +662,9 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
ionic_tx_service, NULL, NULL);
+ if (unlikely(!budget))
+ return budget;
+
rx_work_done = ionic_cq_service(rxcq, budget,
ionic_rx_service, NULL, NULL);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0e3731f50fc2..f7566cfa45ca 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -772,29 +772,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
struct ravb_rx_desc *desc;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ int rx_packets = 0;
u8 desc_status;
- int boguscnt;
u16 pkt_len;
u8 die_dt;
int entry;
int limit;
+ int i;
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
desc = &priv->gbeth_rx_ring[entry];
- while (desc->die_dt != DT_FEMPTY) {
+ for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -820,7 +816,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
break;
case DT_FSTART:
@@ -848,7 +844,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
eth_type_trans(priv->rx_1st_skb, ndev);
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
break;
}
@@ -887,9 +883,9 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
-
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ *quota -= rx_packets;
+ return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 721c1f8e892f..5ba606a596e7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -59,28 +59,51 @@
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
+struct stmmac_q_tx_stats {
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_set_ic_bit;
+ u64_stats_t tx_tso_frames;
+ u64_stats_t tx_tso_nfrags;
+};
+
+struct stmmac_napi_tx_stats {
+ u64_stats_t tx_packets;
+ u64_stats_t tx_pkt_n;
+ u64_stats_t poll;
+ u64_stats_t tx_clean;
+ u64_stats_t tx_set_ic_bit;
+};
+
struct stmmac_txq_stats {
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_pkt_n;
- u64 tx_normal_irq_n;
- u64 napi_poll;
- u64 tx_clean;
- u64 tx_set_ic_bit;
- u64 tx_tso_frames;
- u64 tx_tso_nfrags;
- struct u64_stats_sync syncp;
+ /* Updates protected by tx queue lock. */
+ struct u64_stats_sync q_syncp;
+ struct stmmac_q_tx_stats q;
+
+ /* Updates protected by NAPI poll logic. */
+ struct u64_stats_sync napi_syncp;
+ struct stmmac_napi_tx_stats napi;
} ____cacheline_aligned_in_smp;
+struct stmmac_napi_rx_stats {
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_pkt_n;
+ u64_stats_t poll;
+};
+
struct stmmac_rxq_stats {
- u64 rx_bytes;
- u64 rx_packets;
- u64 rx_pkt_n;
- u64 rx_normal_irq_n;
- u64 napi_poll;
- struct u64_stats_sync syncp;
+ /* Updates protected by NAPI poll logic. */
+ struct u64_stats_sync napi_syncp;
+ struct stmmac_napi_rx_stats napi;
} ____cacheline_aligned_in_smp;
+/* Updates on each CPU protected by not allowing nested irqs. */
+struct stmmac_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+};
+
/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
/* Transmit errors */
@@ -205,6 +228,7 @@ struct stmmac_extra_stats {
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
+ struct stmmac_pcpu_stats __percpu *pcpu_stats;
unsigned long rx_dropped;
unsigned long rx_errors;
unsigned long tx_dropped;
@@ -216,6 +240,7 @@ struct stmmac_safety_stats {
unsigned long mac_errors[32];
unsigned long mtl_errors[32];
unsigned long dma_errors[32];
+ unsigned long dma_dpp_errors[32];
};
/* Number of fields in Safety Stats */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 8f730ada71f9..6b65420e11b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -353,6 +353,10 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY;
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ for (int i = 1; i < plat_dat->tx_queues_to_use; i++)
+ plat_dat->tx_queues_cfg[i].tbs_en = 1;
+
plat_dat->host_dma_width = dwmac->ops->addr_width;
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 137741b94122..b21d99faa2d0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
u32 v;
@@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_TX_INT) {
ret |= handle_tx;
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
}
if (v & EMAC_TX_DMA_STOP_INT)
@@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_RX_INT) {
ret |= handle_rx;
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
}
if (v & EMAC_RX_BUF_UA_INT)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 9470d3fd2ded..0d185e54eb7e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
if (dir == DMA_DIR_RX)
@@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 7907d62d3437..85e18f9a22f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status)
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
@@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 value = readl(ioaddr + DMA_INTR_ENA);
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_INTR_ENA_RIE)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
}
if (likely(intr_status & DMA_STATUS_TI)) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 207ff1799f2c..6a2c7d22df1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -303,6 +303,8 @@
#define XGMAC_RXCEIE BIT(4)
#define XGMAC_TXCEIE BIT(0)
#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
+#define XGMAC_MTL_DPP_CONTROL 0x000010e0
+#define XGMAC_DPP_DISABLE BIT(0)
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
@@ -385,6 +387,7 @@
#define XGMAC_DCEIE BIT(1)
#define XGMAC_TCEIE BIT(0)
#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
+#define XGMAC_DMA_DPP_INT_STATUS 0x00003074
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index eb48211d9b0e..1af2f89a0504 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -830,6 +830,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
+#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
+#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
+ { true, "TDPES0", DPP_TX_ERR },
+ { true, "TDPES1", DPP_TX_ERR },
+ { true, "TDPES2", DPP_TX_ERR },
+ { true, "TDPES3", DPP_TX_ERR },
+ { true, "TDPES4", DPP_TX_ERR },
+ { true, "TDPES5", DPP_TX_ERR },
+ { true, "TDPES6", DPP_TX_ERR },
+ { true, "TDPES7", DPP_TX_ERR },
+ { true, "TDPES8", DPP_TX_ERR },
+ { true, "TDPES9", DPP_TX_ERR },
+ { true, "TDPES10", DPP_TX_ERR },
+ { true, "TDPES11", DPP_TX_ERR },
+ { true, "TDPES12", DPP_TX_ERR },
+ { true, "TDPES13", DPP_TX_ERR },
+ { true, "TDPES14", DPP_TX_ERR },
+ { true, "TDPES15", DPP_TX_ERR },
+ { true, "RDPES0", DPP_RX_ERR },
+ { true, "RDPES1", DPP_RX_ERR },
+ { true, "RDPES2", DPP_RX_ERR },
+ { true, "RDPES3", DPP_RX_ERR },
+ { true, "RDPES4", DPP_RX_ERR },
+ { true, "RDPES5", DPP_RX_ERR },
+ { true, "RDPES6", DPP_RX_ERR },
+ { true, "RDPES7", DPP_RX_ERR },
+ { true, "RDPES8", DPP_RX_ERR },
+ { true, "RDPES9", DPP_RX_ERR },
+ { true, "RDPES10", DPP_RX_ERR },
+ { true, "RDPES11", DPP_RX_ERR },
+ { true, "RDPES12", DPP_RX_ERR },
+ { true, "RDPES13", DPP_RX_ERR },
+ { true, "RDPES14", DPP_RX_ERR },
+ { true, "RDPES15", DPP_RX_ERR },
+};
+
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
void __iomem *ioaddr, bool correctable,
struct stmmac_safety_stats *stats)
@@ -841,6 +879,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
dwxgmac3_log_error(ndev, value, correctable, "DMA",
dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+
+ value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
+ dwxgmac3_dma_dpp_errors,
+ STAT_OFF(dma_dpp_errors), stats);
}
static int
@@ -881,6 +926,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+ /* 5. Enable Data Path Parity Protection */
+ value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
+ /* already enabled by default, explicit enable it again */
+ value &= ~XGMAC_DPP_DISABLE;
+ writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
+
return 0;
}
@@ -914,7 +965,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
ret |= !corr;
}
- err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
+ * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
+ * Parity Errors here
+ */
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
corr = dma & XGMAC_DECIS;
if (err) {
dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
@@ -930,6 +985,7 @@ static const struct dwxgmac3_error {
{ dwxgmac3_mac_errors },
{ dwxgmac3_mtl_errors },
{ dwxgmac3_dma_errors },
+ { dwxgmac3_dma_dpp_errors },
};
static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 3cde695fec91..dd2ab6185c40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
@@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 1bd34b2a47e8..29367105df54 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -224,7 +224,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
- .est_off = EST_XGMAC_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 42d27b97dd1d..ec44becf0e2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -549,44 +549,79 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
}
+static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+ u64 total;
+ int cpu;
+
+ total = 0;
+ for_each_possible_cpu(cpu) {
+ struct stmmac_pcpu_stats *pcpu;
+ unsigned int start;
+ u64 irq_n;
+
+ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu->syncp);
+ irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]);
+ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+ total += irq_n;
+ }
+ return total;
+}
+
+static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+ u64 total;
+ int cpu;
+
+ total = 0;
+ for_each_possible_cpu(cpu) {
+ struct stmmac_pcpu_stats *pcpu;
+ unsigned int start;
+ u64 irq_n;
+
+ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu->syncp);
+ irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]);
+ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+ total += irq_n;
+ }
+ return total;
+}
+
static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int start;
- int q, stat;
- char *p;
+ int q;
for (q = 0; q < tx_cnt; q++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
- struct stmmac_txq_stats snapshot;
+ u64 pkt_n;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- snapshot = *txq_stats;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
- p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
- for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
- *data++ = (*(u64 *)p);
- p += sizeof(u64);
- }
+ *data++ = pkt_n;
+ *data++ = stmmac_get_tx_normal_irq_n(priv, q);
}
for (q = 0; q < rx_cnt; q++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
- struct stmmac_rxq_stats snapshot;
+ u64 pkt_n;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- snapshot = *rxq_stats;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
- p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
- for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
- *data++ = (*(u64 *)p);
- p += sizeof(u64);
- }
+ *data++ = pkt_n;
+ *data++ = stmmac_get_rx_normal_irq_n(priv, q);
}
}
@@ -645,39 +680,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
pos = j;
for (i = 0; i < rx_queues_count; i++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
- struct stmmac_rxq_stats snapshot;
+ struct stmmac_napi_rx_stats snapshot;
+ u64 n_irq;
j = pos;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- snapshot = *rxq_stats;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
-
- data[j++] += snapshot.rx_pkt_n;
- data[j++] += snapshot.rx_normal_irq_n;
- normal_irq_n += snapshot.rx_normal_irq_n;
- napi_poll += snapshot.napi_poll;
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ snapshot = rxq_stats->napi;
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
+
+ data[j++] += u64_stats_read(&snapshot.rx_pkt_n);
+ n_irq = stmmac_get_rx_normal_irq_n(priv, i);
+ data[j++] += n_irq;
+ normal_irq_n += n_irq;
+ napi_poll += u64_stats_read(&snapshot.poll);
}
pos = j;
for (i = 0; i < tx_queues_count; i++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
- struct stmmac_txq_stats snapshot;
+ struct stmmac_napi_tx_stats napi_snapshot;
+ struct stmmac_q_tx_stats q_snapshot;
+ u64 n_irq;
j = pos;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- snapshot = *txq_stats;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
-
- data[j++] += snapshot.tx_pkt_n;
- data[j++] += snapshot.tx_normal_irq_n;
- normal_irq_n += snapshot.tx_normal_irq_n;
- data[j++] += snapshot.tx_clean;
- data[j++] += snapshot.tx_set_ic_bit;
- data[j++] += snapshot.tx_tso_frames;
- data[j++] += snapshot.tx_tso_nfrags;
- napi_poll += snapshot.napi_poll;
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ q_snapshot = txq_stats->q;
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ napi_snapshot = txq_stats->napi;
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
+
+ data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n);
+ n_irq = stmmac_get_tx_normal_irq_n(priv, i);
+ data[j++] += n_irq;
+ normal_irq_n += n_irq;
+ data[j++] += u64_stats_read(&napi_snapshot.tx_clean);
+ data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) +
+ u64_stats_read(&napi_snapshot.tx_set_ic_bit);
+ data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames);
+ data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags);
+ napi_poll += u64_stats_read(&napi_snapshot.poll);
}
normal_irq_n += priv->xstats.rx_early_irq;
data[j++] = normal_irq_n;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a0e46369ae15..7c6aef033a45 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2482,7 +2482,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct xdp_desc xdp_desc;
bool work_done = true;
u32 tx_set_ic_bit = 0;
- unsigned long flags;
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
@@ -2566,9 +2565,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_set_ic_bit += tx_set_ic_bit;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->napi_syncp);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2616,7 +2615,6 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
u32 tx_packets = 0, tx_errors = 0;
- unsigned long flags;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -2674,7 +2672,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
}
if (skb) {
stmmac_get_tx_hwtstamp(priv, p, skb);
- } else {
+ } else if (tx_q->xsk_pool &&
+ xp_tx_metadata_enabled(tx_q->xsk_pool)) {
struct stmmac_xsk_tx_complete tx_compl = {
.priv = priv,
.desc = p,
@@ -2782,11 +2781,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
if (tx_q->dirty_tx != tx_q->cur_tx)
*pending_packets = true;
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_packets += tx_packets;
- txq_stats->tx_pkt_n += tx_packets;
- txq_stats->tx_clean++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
+ u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
+ u64_stats_inc(&txq_stats->napi.tx_clean);
+ u64_stats_update_end(&txq_stats->napi_syncp);
priv->xstats.tx_errors += tx_errors;
@@ -3932,6 +3931,9 @@ static int __stmmac_open(struct net_device *dev,
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
buf_sz = dma_conf->dma_buf_sz;
+ for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
+ dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
stmmac_reset_queues_param(priv);
@@ -4004,8 +4006,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
- if (priv->fpe_wq)
+ if (priv->fpe_wq) {
destroy_workqueue(priv->fpe_wq);
+ priv->fpe_wq = NULL;
+ }
netdev_info(priv->dev, "FPE workqueue stop");
}
@@ -4210,7 +4214,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
- unsigned long flags;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -4375,13 +4378,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_bytes += skb->len;
- txq_stats->tx_tso_frames++;
- txq_stats->tx_tso_nfrags += nfrags;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
+ u64_stats_inc(&txq_stats->q.tx_tso_frames);
+ u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
if (set_ic)
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4480,7 +4483,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
int entry, first_tx;
- unsigned long flags;
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
@@ -4650,11 +4652,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_bytes += skb->len;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
if (set_ic)
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4918,12 +4920,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
set_ic = false;
if (set_ic) {
- unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -5073,7 +5074,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
int coe = priv->hw->rx_csum;
- unsigned long flags;
struct sk_buff *skb;
u32 hash;
@@ -5103,10 +5103,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_pkt_n++;
- rxq_stats->rx_bytes += len;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, len);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5188,7 +5188,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct bpf_prog *prog;
bool failure = false;
- unsigned long flags;
int xdp_status = 0;
int status = 0;
@@ -5343,9 +5342,9 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5383,7 +5382,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
- unsigned long flags;
int xdp_status = 0;
int buf_sz;
@@ -5643,11 +5641,11 @@ drain_data:
stmmac_rx_refill(priv, queue);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_packets += rx_packets;
- rxq_stats->rx_bytes += rx_bytes;
- rxq_stats->rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5662,13 +5660,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_priv *priv = ch->priv_data;
struct stmmac_rxq_stats *rxq_stats;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
rxq_stats = &priv->xstats.rxq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5690,13 +5687,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_txq_stats *txq_stats;
bool pending_packets = false;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
txq_stats = &priv->xstats.txq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
work_done = min(work_done, budget);
@@ -5726,17 +5722,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
struct stmmac_rxq_stats *rxq_stats;
struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
- unsigned long flags;
rxq_stats = &priv->xstats.rxq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
txq_stats = &priv->xstats.txq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
tx_done = min(tx_done, budget);
@@ -6067,11 +6062,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6087,11 +6077,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6113,11 +6098,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6144,11 +6124,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -7062,10 +7037,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
u64 tx_bytes;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- tx_packets = txq_stats->tx_packets;
- tx_bytes = txq_stats->tx_bytes;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
@@ -7077,10 +7055,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
u64 rx_bytes;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- rx_packets = rxq_stats->rx_packets;
- rx_bytes = rxq_stats->rx_bytes;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
+ rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -7474,9 +7452,16 @@ int stmmac_dvr_probe(struct device *device,
priv->dev = ndev;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
- u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
- for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
- u64_stats_init(&priv->xstats.txq_stats[i].syncp);
+ u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
+ u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
+ }
+
+ priv->xstats.pcpu_stats =
+ devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
+ if (!priv->xstats.pcpu_stats)
+ return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
@@ -7542,6 +7527,9 @@ int stmmac_dvr_probe(struct device *device,
dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
ERR_PTR(ret));
+ /* Wait a bit for the reset to take effect */
+ udelay(10);
+
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index be01450c20dc..1530d13984d4 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -189,6 +189,7 @@ config TI_ICSSG_PRUETH
select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
This subsystem is available starting with the AM65 platform.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ea85c6dd5484..c0a5abd8d9a8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
}
+ phy->mac_managed_pm = true;
+
slave->phy = phy;
phy_attached_info(slave->phy);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 498c50c6d1a7..087dcb67505a 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -773,6 +773,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->slave_num);
return;
}
+
+ phy->mac_managed_pm = true;
+
slave->phy = phy;
phy_attached_info(slave->phy);
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index bcccf43d368b..dbbea9146040 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -638,6 +638,16 @@ static void cpts_calc_mult_shift(struct cpts *cpts)
freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
}
+static void cpts_clk_unregister(void *clk)
+{
+ clk_hw_unregister_mux(clk);
+}
+
+static void cpts_clk_del_provider(void *np)
+{
+ of_clk_del_provider(np);
+}
+
static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
{
struct device_node *refclk_np;
@@ -687,9 +697,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
goto mux_fail;
}
- ret = devm_add_action_or_reset(cpts->dev,
- (void(*)(void *))clk_hw_unregister_mux,
- clk_hw);
+ ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw);
if (ret) {
dev_err(cpts->dev, "add clkmux unreg action %d", ret);
goto mux_fail;
@@ -699,8 +707,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
if (ret)
goto mux_fail;
- ret = devm_add_action_or_reset(cpts->dev,
- (void(*)(void *))of_clk_del_provider,
+ ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider,
refclk_np);
if (ret) {
dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d5b75af163d3..c1b0d35c8d05 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -384,18 +384,18 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
- descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
- if (!descr->skb) {
- descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
- return -ENOMEM;
- }
descr->hw_regs.dmac_cmd_status = 0;
descr->hw_regs.result_size = 0;
descr->hw_regs.valid_size = 0;
descr->hw_regs.data_error = 0;
descr->hw_regs.payload.dev_addr = 0;
descr->hw_regs.payload.size = 0;
- descr->skb = NULL;
+
+ descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
+ if (!descr->skb) {
+ descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
+ return -ENOMEM;
+ }
offset = ((unsigned long)descr->skb->data) &
(GELIC_NET_RXBUF_ALIGN - 1);
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 2b6a607ac0b7..a273362c9e70 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -153,6 +153,7 @@ static const struct pci_device_id skfddi_pci_tbl[] = {
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
+MODULE_DESCRIPTION("SysKonnect FDDI PCI driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index 704e949484d0..b9b5554ea862 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -221,21 +221,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!(hw->hw_info.req_buf))
- return -ENOMEM;
+ if (!(hw->hw_info.req_buf)) {
+ result = -ENOMEM;
+ goto free_ep_info;
+ }
hw->hw_info.req_buf_size = mem_size;
mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!(hw->hw_info.res_buf))
- return -ENOMEM;
+ if (!(hw->hw_info.res_buf)) {
+ result = -ENOMEM;
+ goto free_req_buf;
+ }
hw->hw_info.res_buf_size = mem_size;
result = fjes_hw_alloc_shared_status_region(hw);
if (result)
- return result;
+ goto free_res_buf;
hw->hw_info.buffer_share_bit = 0;
hw->hw_info.buffer_unshare_reserve_bit = 0;
@@ -246,11 +250,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
result = fjes_hw_alloc_epbuf(&buf_pair->tx);
if (result)
- return result;
+ goto free_epbuf;
result = fjes_hw_alloc_epbuf(&buf_pair->rx);
if (result)
- return result;
+ goto free_epbuf;
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, mac,
@@ -273,6 +277,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
fjes_hw_init_command_registers(hw, &param);
return 0;
+
+free_epbuf:
+ for (epidx = 0; epidx < hw->max_epid ; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
+ fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
+ }
+ fjes_hw_free_shared_status_region(hw);
+free_res_buf:
+ kfree(hw->hw_info.res_buf);
+ hw->hw_info.res_buf = NULL;
+free_req_buf:
+ kfree(hw->hw_info.req_buf);
+ hw->hw_info.req_buf = NULL;
+free_ep_info:
+ kfree(hw->ep_shm_info);
+ hw->ep_shm_info = NULL;
+ return result;
}
static void fjes_hw_cleanup(struct fjes_hw *hw)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index b1919278e931..2b5357d94ff5 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1903,26 +1903,26 @@ static int __init gtp_init(void)
get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
- err = rtnl_link_register(&gtp_link_ops);
+ err = register_pernet_subsys(&gtp_net_ops);
if (err < 0)
goto error_out;
- err = genl_register_family(&gtp_genl_family);
+ err = rtnl_link_register(&gtp_link_ops);
if (err < 0)
- goto unreg_rtnl_link;
+ goto unreg_pernet_subsys;
- err = register_pernet_subsys(&gtp_net_ops);
+ err = genl_register_family(&gtp_genl_family);
if (err < 0)
- goto unreg_genl_family;
+ goto unreg_rtnl_link;
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx));
return 0;
-unreg_genl_family:
- genl_unregister_family(&gtp_genl_family);
unreg_rtnl_link:
rtnl_link_unregister(&gtp_link_ops);
+unreg_pernet_subsys:
+ unregister_pernet_subsys(&gtp_net_ops);
error_out:
pr_err("error loading GTP module loaded\n");
return err;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 1dafa44155d0..a6fcbda64ecc 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -708,7 +708,10 @@ void netvsc_device_remove(struct hv_device *device)
/* Disable NAPI and disassociate its context from the device. */
for (i = 0; i < net_device->num_chn; i++) {
/* See also vmbus_reset_channel_cb(). */
- napi_disable(&net_device->chan_table[i].napi);
+ /* only disable enabled NAPI channel */
+ if (i < ndev->real_num_rx_queues)
+ napi_disable(&net_device->chan_table[i].napi);
+
netif_napi_del(&net_device->chan_table[i].napi);
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 4406427d4617..11831a1c9762 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -42,9 +42,13 @@
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
+/* Macros to define the context of vf registration */
+#define VF_REG_IN_PROBE 1
+#define VF_REG_IN_NOTIFIER 2
+
static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, 0444);
-MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
unsigned int netvsc_ring_bytes __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
@@ -2185,7 +2189,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
}
static int netvsc_vf_join(struct net_device *vf_netdev,
- struct net_device *ndev)
+ struct net_device *ndev, int context)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
int ret;
@@ -2208,7 +2212,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto upper_link_failed;
}
- schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
+ /* If this registration is called from probe context vf_takeover
+ * is taken care of later in probe itself.
+ */
+ if (context == VF_REG_IN_NOTIFIER)
+ schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
@@ -2346,7 +2354,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
return NOTIFY_DONE;
}
-static int netvsc_register_vf(struct net_device *vf_netdev)
+static int netvsc_register_vf(struct net_device *vf_netdev, int context)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
@@ -2386,7 +2394,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
- if (netvsc_vf_join(vf_netdev, ndev) != 0)
+ if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
return NOTIFY_DONE;
dev_hold(vf_netdev);
@@ -2484,10 +2492,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
return NOTIFY_OK;
}
+static int check_dev_is_matching_vf(struct net_device *event_ndev)
+{
+ /* Skip NetVSC interfaces */
+ if (event_ndev->netdev_ops == &device_ops)
+ return -ENODEV;
+
+ /* Avoid non-Ethernet type devices */
+ if (event_ndev->type != ARPHRD_ETHER)
+ return -ENODEV;
+
+ /* Avoid Vlan dev with same MAC registering as VF */
+ if (is_vlan_dev(event_ndev))
+ return -ENODEV;
+
+ /* Avoid Bonding master dev with same MAC registering as VF */
+ if (netif_is_bond_master(event_ndev))
+ return -ENODEV;
+
+ return 0;
+}
+
static int netvsc_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
- struct net_device *net = NULL;
+ struct net_device *net = NULL, *vf_netdev;
struct net_device_context *net_device_ctx;
struct netvsc_device_info *device_info = NULL;
struct netvsc_device *nvdev;
@@ -2599,6 +2628,30 @@ static int netvsc_probe(struct hv_device *dev,
}
list_add(&net_device_ctx->list, &netvsc_dev_list);
+
+ /* When the hv_netvsc driver is unloaded and reloaded, the
+ * NET_DEVICE_REGISTER for the vf device is replayed before probe
+ * is complete. This is because register_netdevice_notifier() gets
+ * registered before vmbus_driver_register() so that callback func
+ * is set before probe and we don't miss events like NETDEV_POST_INIT
+ * So, in this section we try to register the matching vf device that
+ * is present as a netdevice, knowing that its register call is not
+ * processed in the netvsc_netdev_notifier(as probing is progress and
+ * get_netvsc_byslot fails).
+ */
+ for_each_netdev(dev_net(net), vf_netdev) {
+ ret = check_dev_is_matching_vf(vf_netdev);
+ if (ret != 0)
+ continue;
+
+ if (net != get_netvsc_byslot(vf_netdev))
+ continue;
+
+ netvsc_prepare_bonding(vf_netdev);
+ netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
+ __netvsc_vf_setup(net, vf_netdev);
+ break;
+ }
rtnl_unlock();
netvsc_devinfo_put(device_info);
@@ -2754,28 +2807,17 @@ static int netvsc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ int ret = 0;
- /* Skip our own events */
- if (event_dev->netdev_ops == &device_ops)
- return NOTIFY_DONE;
-
- /* Avoid non-Ethernet type devices */
- if (event_dev->type != ARPHRD_ETHER)
- return NOTIFY_DONE;
-
- /* Avoid Vlan dev with same MAC registering as VF */
- if (is_vlan_dev(event_dev))
- return NOTIFY_DONE;
-
- /* Avoid Bonding master dev with same MAC registering as VF */
- if (netif_is_bond_master(event_dev))
+ ret = check_dev_is_matching_vf(event_dev);
+ if (ret != 0)
return NOTIFY_DONE;
switch (event) {
case NETDEV_POST_INIT:
return netvsc_prepare_bonding(event_dev);
case NETDEV_REGISTER:
- return netvsc_register_vf(event_dev);
+ return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
case NETDEV_UNREGISTER:
return netvsc_unregister_vf(event_dev);
case NETDEV_UP:
@@ -2807,7 +2849,7 @@ static int __init netvsc_drv_init(void)
pr_info("Increased ring_size to %u (min allowed)\n",
ring_size);
}
- netvsc_ring_bytes = ring_size * PAGE_SIZE;
+ netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
register_netdevice_notifier(&netvsc_netdev_notifier);
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 35e55f198e05..2930141d7dd2 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -259,4 +259,5 @@ static __exit void fake_remove_module(void)
module_init(fakelb_init_module);
module_exit(fake_remove_module);
+MODULE_DESCRIPTION("IEEE 802.15.4 loopback driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 4bc05948f772..a78c692f2d3c 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -212,7 +212,7 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
u32 unit_count;
u32 unit;
- unit_count = roundup(ipa->endpoint_count, 32);
+ unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32);
for (unit = 0; unit < unit_count; unit++) {
const struct reg *reg;
u32 val;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 60944a4beada..1afc4c47be73 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -237,4 +237,5 @@ static void __exit ipvtap_exit(void)
module_exit(ipvtap_exit);
MODULE_ALIAS_RTNL_LINK("ipvtap");
MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
+MODULE_DESCRIPTION("IP-VLAN based tap driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index e34816638569..7f5426285c61 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -607,11 +607,26 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-EINVAL);
}
- ret = skb_ensure_writable_head_tail(skb, dev);
- if (unlikely(ret < 0)) {
- macsec_txsa_put(tx_sa);
- kfree_skb(skb);
- return ERR_PTR(ret);
+ if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
+ skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
+ struct sk_buff *nskb = skb_copy_expand(skb,
+ MACSEC_NEEDED_HEADROOM,
+ MACSEC_NEEDED_TAILROOM,
+ GFP_ATOMIC);
+ if (likely(nskb)) {
+ consume_skb(skb);
+ skb = nskb;
+ } else {
+ macsec_txsa_put(tx_sa);
+ kfree_skb(skb);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb) {
+ macsec_txsa_put(tx_sa);
+ return ERR_PTR(-ENOMEM);
+ }
}
unprotected_len = skb->len;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index b4d3b9cde8bd..92a7a36b93ac 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -835,14 +835,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
trap_report_dw.work);
nsim_dev = nsim_trap_data->nsim_dev;
- /* For each running port and enabled packet trap, generate a UDP
- * packet with a random 5-tuple and report it.
- */
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0);
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
return;
}
+ /* For each running port and enabled packet trap, generate a UDP
+ * packet with a random 5-tuple and report it.
+ */
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
if (!netif_running(nsim_dev_port->ns->netdev))
continue;
diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c
index 69b829e6ab35..7fd3377dbd79 100644
--- a/drivers/net/phy/mdio_devres.c
+++ b/drivers/net/phy/mdio_devres.c
@@ -131,4 +131,5 @@ int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
EXPORT_SYMBOL(__devm_of_mdiobus_register);
#endif /* CONFIG_OF_MDIO */
+MODULE_DESCRIPTION("Network MDIO bus devres helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek-ge-soc.c
index 8a20d9889f10..0f3a1538a8b8 100644
--- a/drivers/net/phy/mediatek-ge-soc.c
+++ b/drivers/net/phy/mediatek-ge-soc.c
@@ -489,7 +489,7 @@ static int tx_r50_fill_result(struct phy_device *phydev, u16 tx_r50_cal_val,
u16 reg, val;
if (phydev->drv->phy_id == MTK_GPHY_ID_MT7988)
- bias = -2;
+ bias = -1;
val = clamp_val(bias + tx_r50_cal_val, 0, 63);
@@ -705,6 +705,11 @@ restore:
static void mt798x_phy_common_finetune(struct phy_device *phydev)
{
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+ /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
+ __phy_write(phydev, 0x11, 0xc71);
+ __phy_write(phydev, 0x12, 0xc);
+ __phy_write(phydev, 0x10, 0x8fae);
+
/* EnabRandUpdTrig = 1 */
__phy_write(phydev, 0x11, 0x2f00);
__phy_write(phydev, 0x12, 0xe);
@@ -715,15 +720,56 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
__phy_write(phydev, 0x12, 0x0);
__phy_write(phydev, 0x10, 0x83aa);
- /* TrFreeze = 0 */
+ /* FfeUpdGainForce = 1(Enable), FfeUpdGainForceVal = 4 */
+ __phy_write(phydev, 0x11, 0x240);
+ __phy_write(phydev, 0x12, 0x0);
+ __phy_write(phydev, 0x10, 0x9680);
+
+ /* TrFreeze = 0 (mt7988 default) */
__phy_write(phydev, 0x11, 0x0);
__phy_write(phydev, 0x12, 0x0);
__phy_write(phydev, 0x10, 0x9686);
+ /* SSTrKp100 = 5 */
+ /* SSTrKf100 = 6 */
+ /* SSTrKp1000Mas = 5 */
+ /* SSTrKf1000Mas = 6 */
/* SSTrKp1000Slv = 5 */
+ /* SSTrKf1000Slv = 6 */
__phy_write(phydev, 0x11, 0xbaef);
__phy_write(phydev, 0x12, 0x2e);
__phy_write(phydev, 0x10, 0x968c);
+ phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+}
+
+static void mt7981_phy_finetune(struct phy_device *phydev)
+{
+ u16 val[8] = { 0x01ce, 0x01c1,
+ 0x020f, 0x0202,
+ 0x03d0, 0x03c0,
+ 0x0013, 0x0005 };
+ int i, k;
+
+ /* 100M eye finetune:
+ * Keep middle level of TX MLT3 shapper as default.
+ * Only change TX MLT3 overshoot level here.
+ */
+ for (k = 0, i = 1; i < 12; i++) {
+ if (i % 3 == 0)
+ continue;
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]);
+ }
+
+ phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+ /* ResetSyncOffset = 6 */
+ __phy_write(phydev, 0x11, 0x600);
+ __phy_write(phydev, 0x12, 0x0);
+ __phy_write(phydev, 0x10, 0x8fc0);
+
+ /* VgaDecRate = 1 */
+ __phy_write(phydev, 0x11, 0x4c2a);
+ __phy_write(phydev, 0x12, 0x3e);
+ __phy_write(phydev, 0x10, 0x8fa4);
/* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2,
* MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2
@@ -738,7 +784,7 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
__phy_write(phydev, 0x10, 0x8ec0);
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
- /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9*/
+ /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */
phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234,
MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK,
BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0x9));
@@ -771,48 +817,6 @@ static void mt798x_phy_common_finetune(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_LDO_OUTPUT_V, 0x2222);
}
-static void mt7981_phy_finetune(struct phy_device *phydev)
-{
- u16 val[8] = { 0x01ce, 0x01c1,
- 0x020f, 0x0202,
- 0x03d0, 0x03c0,
- 0x0013, 0x0005 };
- int i, k;
-
- /* 100M eye finetune:
- * Keep middle level of TX MLT3 shapper as default.
- * Only change TX MLT3 overshoot level here.
- */
- for (k = 0, i = 1; i < 12; i++) {
- if (i % 3 == 0)
- continue;
- phy_write_mmd(phydev, MDIO_MMD_VEND1, i, val[k++]);
- }
-
- phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
- __phy_write(phydev, 0x11, 0xc71);
- __phy_write(phydev, 0x12, 0xc);
- __phy_write(phydev, 0x10, 0x8fae);
-
- /* ResetSyncOffset = 6 */
- __phy_write(phydev, 0x11, 0x600);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8fc0);
-
- /* VgaDecRate = 1 */
- __phy_write(phydev, 0x11, 0x4c2a);
- __phy_write(phydev, 0x12, 0x3e);
- __phy_write(phydev, 0x10, 0x8fa4);
-
- /* FfeUpdGainForce = 4 */
- __phy_write(phydev, 0x11, 0x240);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9680);
-
- phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
-}
-
static void mt7988_phy_finetune(struct phy_device *phydev)
{
u16 val[12] = { 0x0187, 0x01cd, 0x01c8, 0x0182,
@@ -827,17 +831,7 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
/* TCT finetune */
phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_TX_FILTER, 0x5);
- /* Disable TX power saving */
- phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
- MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8);
-
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
-
- /* SlvDSPreadyTime = 24, MasDSPreadyTime = 12 */
- __phy_write(phydev, 0x11, 0x671);
- __phy_write(phydev, 0x12, 0xc);
- __phy_write(phydev, 0x10, 0x8fae);
-
/* ResetSyncOffset = 5 */
__phy_write(phydev, 0x11, 0x500);
__phy_write(phydev, 0x12, 0x0);
@@ -845,13 +839,27 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
/* VgaDecRate is 1 at default on mt7988 */
- phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+ /* MrvlTrFix100Kp = 6, MrvlTrFix100Kf = 7,
+ * MrvlTrFix1000Kp = 6, MrvlTrFix1000Kf = 7
+ */
+ __phy_write(phydev, 0x11, 0xb90a);
+ __phy_write(phydev, 0x12, 0x6f);
+ __phy_write(phydev, 0x10, 0x8f82);
+
+ /* RemAckCntLimitCtrl = 1 */
+ __phy_write(phydev, 0x11, 0xfbba);
+ __phy_write(phydev, 0x12, 0xc3);
+ __phy_write(phydev, 0x10, 0x87f8);
- phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_2A30);
- /* TxClkOffset = 2 */
- __phy_modify(phydev, MTK_PHY_ANARG_RG, MTK_PHY_TCLKOFFSET_MASK,
- FIELD_PREP(MTK_PHY_TCLKOFFSET_MASK, 0x2));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+
+ /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG234,
+ MTK_PHY_TR_OPEN_LOOP_EN_MASK | MTK_PHY_LPF_X_AVERAGE_MASK,
+ BIT(0) | FIELD_PREP(MTK_PHY_LPF_X_AVERAGE_MASK, 0xa));
+
+ /* rg_tr_lpf_cnt_val = 1023 */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_LPF_CNT_VAL, 0x3ff);
}
static void mt798x_phy_eee(struct phy_device *phydev)
@@ -884,11 +892,11 @@ static void mt798x_phy_eee(struct phy_device *phydev)
MTK_PHY_LPI_SLV_SEND_TX_EN,
FIELD_PREP(MTK_PHY_LPI_SLV_SEND_TX_TIMER_MASK, 0x120));
- phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239,
- MTK_PHY_LPI_SEND_LOC_TIMER_MASK |
- MTK_PHY_LPI_TXPCS_LOC_RCV,
- FIELD_PREP(MTK_PHY_LPI_SEND_LOC_TIMER_MASK, 0x117));
+ /* Keep MTK_PHY_LPI_SEND_LOC_TIMER as 375 */
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG239,
+ MTK_PHY_LPI_TXPCS_LOC_RCV);
+ /* This also fixes some IoT issues, such as CH340 */
phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_DEV1E_REG2C7,
MTK_PHY_MAX_GAIN_MASK | MTK_PHY_MIN_GAIN_MASK,
FIELD_PREP(MTK_PHY_MAX_GAIN_MASK, 0x8) |
@@ -922,7 +930,7 @@ static void mt798x_phy_eee(struct phy_device *phydev)
__phy_write(phydev, 0x12, 0x0);
__phy_write(phydev, 0x10, 0x9690);
- /* REG_EEE_st2TrKf1000 = 3 */
+ /* REG_EEE_st2TrKf1000 = 2 */
__phy_write(phydev, 0x11, 0x114f);
__phy_write(phydev, 0x12, 0x2);
__phy_write(phydev, 0x10, 0x969a);
@@ -947,7 +955,7 @@ static void mt798x_phy_eee(struct phy_device *phydev)
__phy_write(phydev, 0x12, 0x0);
__phy_write(phydev, 0x10, 0x96b8);
- /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 1 */
+ /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 0 */
__phy_write(phydev, 0x11, 0x1463);
__phy_write(phydev, 0x12, 0x0);
__phy_write(phydev, 0x10, 0x96ca);
@@ -1459,6 +1467,13 @@ static int mt7988_phy_probe(struct phy_device *phydev)
if (err)
return err;
+ /* Disable TX power saving at probing to:
+ * 1. Meet common mode compliance test criteria
+ * 2. Make sure that TX-VCM calibration works fine
+ */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
+ MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3 << 8);
+
return mt798x_phy_calibration(phydev);
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 81c20eb4b54b..dad720138baa 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -120,6 +120,11 @@
*/
#define LAN8814_1PPM_FORMAT 17179
+#define PTP_RX_VERSION 0x0248
+#define PTP_TX_VERSION 0x0288
+#define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
+#define PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0))
+
#define PTP_RX_MOD 0x024F
#define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
#define PTP_RX_TIMESTAMP_EN 0x024D
@@ -3150,6 +3155,12 @@ static void lan8814_ptp_init(struct phy_device *phydev)
lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+ /* Disable checking for minorVersionPTP field */
+ lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
+ PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
+ lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
+ PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
+
skb_queue_head_init(&ptp_priv->tx_queue);
skb_queue_head_init(&ptp_priv->rx_queue);
INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 894172a3e15f..337899c69738 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -421,9 +421,11 @@ static int rtl8211f_config_init(struct phy_device *phydev)
ERR_PTR(ret));
return ret;
}
+
+ return genphy_soft_reset(phydev);
}
- return genphy_soft_reset(phydev);
+ return 0;
}
static int rtl821x_suspend(struct phy_device *phydev)
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 40ce8abe6999..cc7d1113ece0 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1437,4 +1437,5 @@ static int __init plip_init (void)
module_init(plip_init);
module_exit(plip_cleanup_module);
+MODULE_DESCRIPTION("PLIP (parallel port) network module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
index db0dc36d12e3..55954594e157 100644
--- a/drivers/net/ppp/bsd_comp.c
+++ b/drivers/net/ppp/bsd_comp.c
@@ -1166,5 +1166,6 @@ static void __exit bsdcomp_cleanup(void)
module_init(bsdcomp_init);
module_exit(bsdcomp_cleanup);
+MODULE_DESCRIPTION("PPP BSD-Compress compression module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 840da924708b..c33c3db3cc08 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -87,6 +87,7 @@ struct asyncppp {
static int flag_time = HZ;
module_param(flag_time, int, 0);
MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
+MODULE_DESCRIPTION("PPP async serial channel module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_PPP);
@@ -460,6 +461,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
case PPPIOCSMRU:
if (get_user(val, p))
break;
+ if (val > U16_MAX) {
+ err = -EINVAL;
+ break;
+ }
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index e6d48e5c65a3..4d2ff63f2ee2 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -630,6 +630,7 @@ static void __exit deflate_cleanup(void)
module_init(deflate_init);
module_exit(deflate_cleanup);
+MODULE_DESCRIPTION("PPP Deflate compression module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE));
MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT));
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0193af2d31c9..3dd52bf28f15 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -3604,6 +3604,7 @@ EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
+MODULE_DESCRIPTION("Generic PPP layer driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
MODULE_ALIAS_RTNL_LINK("ppp");
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 52d05ce4a281..45bf59ac8f57 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -724,5 +724,6 @@ ppp_sync_cleanup(void)
module_init(ppp_sync_init);
module_exit(ppp_sync_cleanup);
+MODULE_DESCRIPTION("PPP synchronous TTY channel module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_SYNC_PPP);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 8e7238e97d0a..2ea4f4890d23 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1007,26 +1007,21 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
struct sk_buff *skb;
int error = 0;
- if (sk->sk_state & PPPOX_BOUND) {
- error = -EIO;
- goto end;
- }
+ if (sk->sk_state & PPPOX_BOUND)
+ return -EIO;
skb = skb_recv_datagram(sk, flags, &error);
- if (error < 0)
- goto end;
+ if (!skb)
+ return error;
- if (skb) {
- total_len = min_t(size_t, total_len, skb->len);
- error = skb_copy_datagram_msg(skb, 0, m, total_len);
- if (error == 0) {
- consume_skb(skb);
- return total_len;
- }
+ total_len = min_t(size_t, total_len, skb->len);
+ error = skb_copy_datagram_msg(skb, 0, m, total_len);
+ if (error == 0) {
+ consume_skb(skb);
+ return total_len;
}
kfree_skb(skb);
-end:
return error;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index afa5497f7c35..8f95a562b8d0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -653,6 +653,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
+ ntfile->xdp_rxq.queue_index = index;
rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
NULL);
@@ -1630,13 +1631,19 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
switch (act) {
case XDP_REDIRECT:
err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
- if (err)
+ if (err) {
+ dev_core_stats_rx_dropped_inc(tun->dev);
return err;
+ }
+ dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
break;
case XDP_TX:
err = tun_xdp_tx(tun->dev, xdp);
- if (err < 0)
+ if (err < 0) {
+ dev_core_stats_rx_dropped_inc(tun->dev);
return err;
+ }
+ dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
break;
case XDP_PASS:
break;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 99ec1d4a972d..8b6d6a1b3c2e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -232,7 +232,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
err = dm_read_shared_word(dev, 1, loc, &res);
if (err < 0) {
netdev_err(dev->net, "MDIO read error: %d\n", err);
- return err;
+ return 0;
}
netdev_dbg(dev->net,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a6d653ff552a..ba6c8ac2a736 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1501,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
lan78xx_rx_urb_submit_all(dev);
+ local_bh_disable();
napi_schedule(&dev->napi);
+ local_bh_enable();
}
return 0;
@@ -3033,7 +3035,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
- if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+ if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
+ dev->chipid == ID_REV_CHIP_ID_7850_) {
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
if (!ret && sig != EEPROM_INDICATOR) {
/* Implies there is no external eeprom. Set mac speed */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index a530f20ee257..2fa46baa589e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -2105,6 +2105,11 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long) &smsc95xx_info,
},
{
+ /* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */
+ USB_DEVICE(0x0878, 0x1400),
+ .driver_info = (unsigned long)&smsc95xx_info,
+ },
+ {
/* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
USB_DEVICE(0x184F, 0x0051),
.driver_info = (unsigned long)&smsc95xx_info,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 578e36ea1589..cd4a6fe458f9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1208,14 +1208,6 @@ static int veth_enable_xdp(struct net_device *dev)
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
return err;
}
-
- if (!veth_gro_requested(dev)) {
- /* user-space did not require GRO, but adding XDP
- * is supposed to get GRO working
- */
- dev->features |= NETIF_F_GRO;
- netdev_features_change(dev);
- }
}
}
@@ -1235,18 +1227,9 @@ static void veth_disable_xdp(struct net_device *dev)
for (i = 0; i < dev->real_num_rx_queues; i++)
rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
- if (!netif_running(dev) || !veth_gro_requested(dev)) {
+ if (!netif_running(dev) || !veth_gro_requested(dev))
veth_napi_del(dev);
- /* if user-space did not require GRO, since adding XDP
- * enabled it, clear it now
- */
- if (!veth_gro_requested(dev) && netif_running(dev)) {
- dev->features &= ~NETIF_F_GRO;
- netdev_features_change(dev);
- }
- }
-
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
}
@@ -1478,7 +1461,8 @@ static int veth_alloc_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev);
int i;
- priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
+ priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!priv->rq)
return -ENOMEM;
@@ -1494,7 +1478,7 @@ static void veth_free_queues(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
- kfree(priv->rq);
+ kvfree(priv->rq);
}
static int veth_dev_init(struct net_device *dev)
@@ -1654,6 +1638,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
if (!old_prog) {
+ if (!veth_gro_requested(dev)) {
+ /* user-space did not require GRO, but adding
+ * XDP is supposed to get GRO working
+ */
+ dev->features |= NETIF_F_GRO;
+ netdev_features_change(dev);
+ }
+
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
}
@@ -1669,6 +1661,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
+ /* if user-space did not require GRO, since adding XDP
+ * enabled it, clear it now
+ */
+ if (!veth_gro_requested(dev)) {
+ dev->features &= ~NETIF_F_GRO;
+ netdev_features_change(dev);
+ }
+
if (peer) {
peer->hw_features |= NETIF_F_GSO_SOFTWARE;
peer->max_mtu = ETH_MAX_MTU;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 43e0db78d42b..a742cec44e3d 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1803,5 +1803,6 @@ static struct usb_driver ar5523_driver = {
module_usb_driver(ar5523_driver);
+MODULE_DESCRIPTION("Atheros AR5523 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(AR5523_FIRMWARE_FILE);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 7e3b6779f4e9..02e160d831be 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -368,10 +368,6 @@ struct ath11k_vif {
struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
-
-#ifdef CONFIG_ATH11K_DEBUGFS
- struct dentry *debugfs_twt;
-#endif /* CONFIG_ATH11K_DEBUGFS */
};
struct ath11k_vif_iter {
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index a847bc0d50c0..a48e737ef35d 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -1894,35 +1894,30 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = {
.open = simple_open
};
-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_base *ab = arvif->ar->ab;
+ struct dentry *debugfs_twt;
if (arvif->vif->type != NL80211_IFTYPE_AP &&
!(arvif->vif->type == NL80211_IFTYPE_STATION &&
test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
return;
- arvif->debugfs_twt = debugfs_create_dir("twt",
- arvif->vif->debugfs_dir);
- debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+ debugfs_twt = debugfs_create_dir("twt",
+ arvif->vif->debugfs_dir);
+ debugfs_create_file("add_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_add_dialog);
- debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+ debugfs_create_file("del_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_del_dialog);
- debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+ debugfs_create_file("pause_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_pause_dialog);
- debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+ debugfs_create_file("resume_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_resume_dialog);
}
-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
-{
- if (!arvif->debugfs_twt)
- return;
-
- debugfs_remove_recursive(arvif->debugfs_twt);
- arvif->debugfs_twt = NULL;
-}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index 44d15845f39a..a39e458637b0 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -307,8 +307,8 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
enum ath11k_dbg_dbr_event event,
@@ -387,14 +387,6 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
-static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
-{
-}
-
-static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
-{
-}
-
static inline void
ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index db241589424d..b13525bbbb80 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -6756,13 +6756,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err;
}
- /* In the case of hardware recovery, debugfs files are
- * not deleted since ieee80211_ops.remove_interface() is
- * not invoked. In such cases, try to delete the files.
- * These will be re-created later.
- */
- ath11k_debugfs_remove_interface(arvif);
-
memset(arvif, 0, sizeof(*arvif));
arvif->ar = ar;
@@ -6939,8 +6932,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ath11k_dp_vdev_tx_attach(ar, arvif);
- ath11k_debugfs_add_interface(arvif);
-
if (vif->type != NL80211_IFTYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
ret = ath11k_mac_monitor_vdev_create(ar);
@@ -7056,8 +7047,6 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
- ath11k_debugfs_remove_interface(arvif);
-
/* TODO: recal traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
@@ -9153,6 +9142,7 @@ static const struct ieee80211_ops ath11k_ops = {
#endif
#ifdef CONFIG_ATH11K_DEBUGFS
+ .vif_add_debugfs = ath11k_debugfs_op_vif_add,
.sta_add_debugfs = ath11k_debugfs_sta_op_add,
#endif
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 41119fb177e3..4e6b4df8562f 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1685,6 +1685,7 @@ static struct platform_driver wcn36xx_driver = {
module_platform_driver(wcn36xx_driver);
+MODULE_DESCRIPTION("Qualcomm Atheros WCN3660/3680 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
index d55f3271d619..4f0c1e1a8e60 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_bca_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_BCA, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom AP chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 133c5ea6429c..28d6a30cc010 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -3779,8 +3779,10 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
if (req->channels[i] == chan)
break;
}
- if (i == req->n_channels)
- req->channels[req->n_channels++] = chan;
+ if (i == req->n_channels) {
+ req->n_channels++;
+ req->channels[i] = chan;
+ }
for (i = 0; i < req->n_ssids; i++) {
if (req->ssids[i].ssid_len == ssid_len &&
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
index f82fbbe3ecef..90d06cda03a2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_cyw_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_CYW, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Cypress/Infineon chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
index 02918d434556..b66135e3cff4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_wcc_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_WCC, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom mobility chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index b96f30d11644..dcc4810cb324 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -618,7 +618,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 2) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -634,7 +634,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 1) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -650,7 +650,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -707,7 +707,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 2) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -723,7 +723,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 1) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -739,7 +739,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -1116,6 +1116,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
goto read_table;
}
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+
read_table:
fwrt->ppag_ver = tbl_rev;
flags = &wifi_pkg->package.elements[1];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 798731ecbefd..b740c65a7dca 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -537,7 +537,7 @@ enum iwl_fw_dbg_config_cmd_type {
}; /* LDBG_CFG_CMD_TYPE_API_E_VER_1 */
/* this token disables debug asserts in the firmware */
-#define IWL_FW_DBG_CONFIG_TOKEN 0x00011301
+#define IWL_FW_DBG_CONFIG_TOKEN 0x00010001
/**
* struct iwl_fw_dbg_config_cmd - configure FW debug
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 9c69d3674384..e6c0f928a6bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2019-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -66,6 +66,16 @@ enum iwl_gen2_tx_fifo {
IWL_GEN2_TRIG_TX_FIFO_VO,
};
+enum iwl_bz_tx_fifo {
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+};
/**
* enum iwl_tx_queue_cfg_actions - TXQ config options
* @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index e27774e7ed74..80fda056e46a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -19,7 +19,6 @@
* @fwrt_ptr: pointer to the buffer coming from fwrt
* @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
* transport's data.
- * @trans_len: length of the valid data in trans_ptr
* @fwrt_len: length of the valid data in fwrt_ptr
*/
struct iwl_fw_dump_ptrs {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 3b14f6476743..72075720969c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@@ -1096,7 +1096,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
node_trig = (void *)node_tlv->data;
}
- memcpy(node_trig->data + offset, trig->data, trig_data_len);
+ memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
node_tlv->length = cpu_to_le32(size);
if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ffe2670720c9..abf8001bdac1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -128,6 +128,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.ucode_capa.cmd_versions);
kfree(drv->fw.phy_integration_ver);
kfree(drv->trans->dbg.pc_data);
+ drv->trans->dbg.pc_data = NULL;
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 402896988686..2f6774ec37b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -668,7 +668,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 |
@@ -793,7 +792,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
@@ -1020,8 +1018,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
iftype_data->eht_cap.has_eht) {
iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
- ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
- IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2);
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &=
~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 4582afb149d7..05b64176859e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1279,7 +1279,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->net_detect = true;
} else {
- struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .offloading_tid = 0,
+ };
wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id;
@@ -1291,6 +1293,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
goto out_noreset;
}
+ ret = iwl_mvm_sta_ensure_queue(
+ mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+ if (ret)
+ goto out_noreset;
+
ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
vif, mvmvif, ap_sta);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index c4f96125cf33..25a5a31e63c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -31,6 +31,17 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
IWL_GEN2_TRIG_TX_FIFO_BK,
};
+const u8 iwl_mvm_ac_to_bz_tx_fifo[] = {
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+};
+
struct iwl_mvm_mac_iface_iterator_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *vif;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 7f13dff04b26..53e26c3c3a9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1600,7 +1600,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
*/
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC) {
- iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
ret = 0;
goto out;
}
@@ -1640,7 +1641,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef);
}
- iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
@@ -3685,6 +3687,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
NL80211_TDLS_SETUP);
}
+ if (ret)
+ return ret;
+
for_each_sta_active_link(vif, sta, link_sta, i)
link_sta->agg.max_rc_amsdu_len = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 61170173f917..893b69fc841b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -81,7 +81,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
}
- iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
@@ -437,6 +438,9 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
mvmvif->ap_ibss_active = false;
}
+ iwl_mvm_link_changed(mvm, vif, link_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE, false);
+
if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
int ret = iwl_mvm_esr_mode_inactive(mvm, vif);
@@ -448,9 +452,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_MONITOR)
iwl_mvm_mld_rm_snif_sta(mvm, vif);
- iwl_mvm_link_changed(mvm, vif, link_conf,
- LINK_CONTEXT_MODIFY_ACTIVE, false);
-
if (switching_chanctx)
return;
mvmvif->link[link_id]->phy_ctxt = NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 40627961b834..81dbef6947f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1581,12 +1581,16 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
extern const u8 iwl_mvm_ac_to_tx_fifo[];
extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac)
{
- return iwl_mvm_has_new_tx_api(mvm) ?
- iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return iwl_mvm_ac_to_bz_tx_fifo[ac];
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_ac_to_gen2_tx_fifo[ac];
+ return iwl_mvm_ac_to_tx_fifo[ac];
}
struct iwl_rate_info {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 886d00098528..af15d470c69b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -505,6 +505,10 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
return false;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (WARN_ON_ONCE(!mvm_sta->dup_data))
+ return false;
+
dup_data = &mvm_sta->dup_data[queue];
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 2a3ca9785974..c2e0cff740e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1502,6 +1502,34 @@ out_err:
return ret;
}
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ int ret = -EINVAL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
+ return 0;
+ }
+
+ if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ ret = 0;
+ }
+
+ local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ if (!list_empty(&mvmtxq->list))
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+ local_bh_enable();
+
+ return ret;
+}
+
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index b33a0ce096d4..3cf8a70274ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -571,6 +571,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 218fdf1ed530..2e653a417d62 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -972,6 +972,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
+ mvmvif->time_event_data.link_id = -1;
iwl_mvm_p2p_roc_finished(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
} else if (le32_to_cpu(notif->start)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index db986bfc4dc3..461f26d9214e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -520,13 +520,24 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
}
}
+static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
+ const u8 *addr3_override)
+{
+ struct ieee80211_hdr *out_hdr = cmd;
+
+ memcpy(cmd, hdr, hdrlen);
+ if (addr3_override)
+ memcpy(out_hdr->addr3, addr3_override, ETH_ALEN);
+}
+
/*
* Allocates and sets the Tx cmd the driver data pointers in the skb
*/
static struct iwl_device_tx_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, int hdrlen,
- struct ieee80211_sta *sta, u8 sta_id)
+ struct ieee80211_sta *sta, u8 sta_id,
+ const u8 *addr3_override)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_device_tx_cmd *dev_cmd;
@@ -584,7 +595,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le16(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
@@ -599,7 +610,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le32(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
@@ -617,7 +628,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
/* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
out:
return dev_cmd;
@@ -820,7 +831,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
- dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id,
+ NULL);
if (!dev_cmd)
return -1;
@@ -1140,7 +1152,8 @@ static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
*/
static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta,
+ const u8 *addr3_override)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
@@ -1172,7 +1185,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_probe_resp_set_noa(mvm, skb);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
- sta, mvmsta->deflink.sta_id);
+ sta, mvmsta->deflink.sta_id,
+ addr3_override);
if (!dev_cmd)
goto drop;
@@ -1294,9 +1308,11 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info info;
struct sk_buff_head mpdus_skbs;
+ struct ieee80211_vif *vif;
unsigned int payload_len;
int ret;
struct sk_buff *orig_skb = skb;
+ const u8 *addr3;
if (WARN_ON_ONCE(!mvmsta))
return -1;
@@ -1307,26 +1323,59 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
memcpy(&info, skb->cb, sizeof(info));
if (!skb_is_gso(skb))
- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
tcp_hdrlen(skb) + skb->data_len;
if (payload_len <= skb_shinfo(skb)->gso_size)
- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
__skb_queue_head_init(&mpdus_skbs);
+ vif = info.control.vif;
+ if (!vif)
+ return -1;
+
ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
if (ret)
return ret;
WARN_ON(skb_queue_empty(&mpdus_skbs));
+ /*
+ * As described in IEEE sta 802.11-2020, table 9-30 (Address
+ * field contents), A-MSDU address 3 should contain the BSSID
+ * address.
+ * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it
+ * in the command header. We need to preserve the original
+ * address 3 in the skb header to correctly create all the
+ * A-MSDU subframe headers from it.
+ */
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ addr3 = vif->cfg.ap_addr;
+ break;
+ case NL80211_IFTYPE_AP:
+ addr3 = vif->addr;
+ break;
+ default:
+ addr3 = NULL;
+ break;
+ }
+
while (!skb_queue_empty(&mpdus_skbs)) {
+ struct ieee80211_hdr *hdr;
+ bool amsdu;
+
skb = __skb_dequeue(&mpdus_skbs);
+ hdr = (void *)skb->data;
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
- ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta,
+ amsdu ? addr3 : NULL);
if (ret) {
/* Free skbs created as part of TSO logic that have not yet been dequeued */
__skb_queue_purge(&mpdus_skbs);
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index b52cce38115d..c4fe70e05b9b 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
"FW rev %s - Softmac protocol %x.%x\n",
fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version),
- "%s - %x.%x", fw_version,
+ "%.19s - %x.%x", fw_version,
priv->fw_var >> 8, priv->fw_var & 0xff);
}
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index ce0179b8ab36..0073b5e0f9c9 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -700,6 +700,7 @@ static struct spi_driver p54spi_driver = {
module_spi_driver(p54spi_driver);
+MODULE_DESCRIPTION("Prism54 SPI wireless driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
MODULE_ALIAS("spi:cx3110x");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 89d738deea62..e2146d30e553 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -728,6 +728,7 @@ const struct ieee80211_ops mt7603_ops = {
.set_sar_specs = mt7603_set_sar_specs,
};
+MODULE_DESCRIPTION("MediaTek MT7603E and MT76x8 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static int __init mt7603_init(void)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index dab16b5fc386..0971c164b57e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -1375,4 +1375,5 @@ const struct ieee80211_ops mt7615_ops = {
};
EXPORT_SYMBOL_GPL(mt7615_ops);
+MODULE_DESCRIPTION("MediaTek MT7615E and MT7663E wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index ac036a072439..87a956ea3ad7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -270,4 +270,5 @@ static void __exit mt7615_exit(void)
module_init(mt7615_init);
module_exit(mt7615_exit);
+MODULE_DESCRIPTION("MediaTek MT7615E MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 67cedd2555f9..9692890ba51b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -253,4 +253,5 @@ module_sdio_driver(mt7663s_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7663S (SDIO) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 04963b9f7498..df737e1ff27b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -281,4 +281,5 @@ module_usb_driver(mt7663u_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7663U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index 0052d103e276..820b39590027 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -349,4 +349,5 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT7663 SDIO/USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 96494ba2fdf7..3a20ba0d2492 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -3160,4 +3160,5 @@ exit:
EXPORT_SYMBOL_GPL(mt76_connac2_mcu_fill_message);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT76x connac layer helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index c3a392a1a659..bcd24c9072ec 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -342,4 +342,5 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
return 0;
}
+MODULE_DESCRIPTION("MediaTek MT76x EEPROM helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 9277ff38b7a2..293e66fa83d5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -302,6 +302,7 @@ static const struct pci_device_id mt76x0e_device_table[] = {
MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
MODULE_FIRMWARE(MT7610E_FIRMWARE);
MODULE_FIRMWARE(MT7650E_FIRMWARE);
+MODULE_DESCRIPTION("MediaTek MT76x0E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76x0e_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 0422c332354a..dd042949cf82 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -336,6 +336,7 @@ err:
MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
MODULE_FIRMWARE(MT7610E_FIRMWARE);
MODULE_FIRMWARE(MT7610U_FIRMWARE);
+MODULE_DESCRIPTION("MediaTek MT76x0U (USB) wireless driver");
MODULE_LICENSE("GPL");
static struct usb_driver mt76x0_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index 02da543dfc5c..b2cc44914294 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -293,4 +293,5 @@ void mt76x02u_init_mcu(struct mt76_dev *dev)
EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x02 MCU helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 8a0e8124b894..8020446be37b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -696,4 +696,5 @@ void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list);
+MODULE_DESCRIPTION("MediaTek MT76x02 helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 8c01855885ce..1fe5f5a02f93 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -506,4 +506,5 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+MODULE_DESCRIPTION("MediaTek MT76x2 EEPROM helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index df85ebc6e1df..30959746e924 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -165,6 +165,7 @@ mt76x2e_resume(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, mt76x2e_device_table);
MODULE_FIRMWARE(MT7662_FIRMWARE);
MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_DESCRIPTION("MediaTek MT76x2E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76pci_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 55068f3252ef..ca78e14251c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -147,4 +147,5 @@ static struct usb_driver mt76x2u_driver = {
module_usb_driver(mt76x2u_driver);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x2U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index aff4f21e843d..3039f53e2245 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -958,4 +958,5 @@ static void __exit mt7915_exit(void)
module_init(mt7915_init);
module_exit(mt7915_exit);
+MODULE_DESCRIPTION("MediaTek MT7915E MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 0645417e0582..0d5adc5ddae3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -1418,5 +1418,6 @@ const struct ieee80211_ops mt7921_ops = {
};
EXPORT_SYMBOL_GPL(mt7921_ops);
+MODULE_DESCRIPTION("MediaTek MT7921 core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 57903c6e4f11..dde26f327478 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -544,4 +544,5 @@ MODULE_FIRMWARE(MT7922_FIRMWARE_WM);
MODULE_FIRMWARE(MT7922_ROM_PATCH);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7921E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 7591e54d2897..a9ce1e746b95 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -323,5 +323,6 @@ static struct sdio_driver mt7921s_driver = {
.drv.pm = pm_sleep_ptr(&mt7921s_pm_ops),
};
module_sdio_driver(mt7921s_driver);
+MODULE_DESCRIPTION("MediaTek MT7921S (SDIO) wireless driver");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index e5258c74fc07..8b7c03c47598 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -336,5 +336,6 @@ static struct usb_driver mt7921u_driver = {
};
module_usb_driver(mt7921u_driver);
+MODULE_DESCRIPTION("MediaTek MT7921U (USB) wireless driver");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 8f1075da4903..125a1be3cb64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -1450,4 +1450,5 @@ const struct ieee80211_ops mt7925_ops = {
EXPORT_SYMBOL_GPL(mt7925_ops);
MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT7925 core driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 734f31ee40d3..1fd99a856541 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -583,4 +583,5 @@ MODULE_FIRMWARE(MT7925_FIRMWARE_WM);
MODULE_FIRMWARE(MT7925_ROM_PATCH);
MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7925E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 9b885c5b3ed5..1e0f094fc905 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -329,4 +329,5 @@ static struct usb_driver mt7925u_driver = {
module_usb_driver(mt7925u_driver);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7925U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index 502be22dbe36..c42101aa9e45 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -862,5 +862,6 @@ int mt792x_load_firmware(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt792x_load_firmware);
+MODULE_DESCRIPTION("MediaTek MT792x core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
index 2dd283caed36..589a3efb9f8c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
@@ -314,5 +314,6 @@ void mt792xu_disconnect(struct usb_interface *usb_intf)
}
EXPORT_SYMBOL_GPL(mt792xu_disconnect);
+MODULE_DESCRIPTION("MediaTek MT792x USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 3c729b563edc..699be57309c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -4477,7 +4477,8 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
skb_put_data(skb, &req, sizeof(req));
/* cck and ofdm */
- skb_put_data(skb, &la.cck, sizeof(la.cck) + sizeof(la.ofdm));
+ skb_put_data(skb, &la.cck, sizeof(la.cck));
+ skb_put_data(skb, &la.ofdm, sizeof(la.ofdm));
/* ht20 */
skb_put_data(skb, &la.mcs[0], 8);
/* ht40 */
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index c50d89a445e9..9f2abfa273c9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -650,4 +650,5 @@ static void __exit mt7996_exit(void)
module_init(mt7996_init);
module_exit(mt7996_exit);
+MODULE_DESCRIPTION("MediaTek MT7996 MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index c52d550f0c32..3e88798df017 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -672,4 +672,5 @@ EXPORT_SYMBOL_GPL(mt76s_init);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT76x SDIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 1584665fe3cb..5a0bcb5071bd 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -1128,4 +1128,5 @@ int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
EXPORT_SYMBOL_GPL(mt76u_init);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index fc76c66ff1a5..d6c01a2dd198 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -138,4 +138,5 @@ int __mt76_worker_fn(void *ptr)
}
EXPORT_SYMBOL_GPL(__mt76_worker_fn);
+MODULE_DESCRIPTION("MediaTek MT76x helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 91d71e0f7ef2..81e8f25863f5 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -1018,5 +1018,6 @@ unregister_netdev:
return ERR_PTR(ret);
}
+MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER));
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 0d13e3e46e98..d6d394693090 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -984,4 +984,5 @@ static struct sdio_driver wilc_sdio_driver = {
module_driver(wilc_sdio_driver,
sdio_register_driver,
sdio_unregister_driver);
+MODULE_DESCRIPTION("Atmel WILC1000 SDIO wireless driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 77b4cdff73c3..1d8b241ce43c 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -273,6 +273,7 @@ static struct spi_driver wilc_spi_driver = {
.remove = wilc_bus_remove,
};
module_spi_driver(wilc_spi_driver);
+MODULE_DESCRIPTION("Atmel WILC1000 SPI wireless driver");
MODULE_LICENSE("GPL");
static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 301bd0043a43..4e5b351f80f0 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -343,5 +343,6 @@ static void __exit wl1251_sdio_exit(void)
module_init(wl1251_sdio_init);
module_exit(wl1251_sdio_exit);
+MODULE_DESCRIPTION("TI WL1251 SDIO helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 29292f06bd3d..1936bb3af54a 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -342,6 +342,7 @@ static struct spi_driver wl1251_spi_driver = {
module_spi_driver(wl1251_spi_driver);
+MODULE_DESCRIPTION("TI WL1251 SPI helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
MODULE_ALIAS("spi:wl1251");
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index de045fe4ca1e..b26d42b4e3cc 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1955,6 +1955,7 @@ module_param_named(tcxo, tcxo_param, charp, 0);
MODULE_PARM_DESC(tcxo,
"TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6");
+MODULE_DESCRIPTION("TI WL12xx wireless driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 20d9181b3410..2ccac1cdec01 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -2086,6 +2086,7 @@ module_param_named(num_rx_desc, num_rx_desc_param, int, 0400);
MODULE_PARM_DESC(num_rx_desc_param,
"Number of Rx descriptors: u8 (default is 32)");
+MODULE_DESCRIPTION("TI WiLink 8 wireless driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL18XX_FW_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index fb9ed97774c7..5736acb4d206 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6793,6 +6793,7 @@ MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
module_param(no_recovery, int, 0600);
MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
+MODULE_DESCRIPTION("TI WLAN core driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index f0686635db46..eb5482ed76ae 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -447,6 +447,7 @@ module_sdio_driver(wl1271_sdio_driver);
module_param(dump, bool, 0600);
MODULE_PARM_DESC(dump, "Enable sdio read/write dumps.");
+MODULE_DESCRIPTION("TI WLAN SDIO helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 7d9a139db59e..0aa2b2f3c5c9 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -562,6 +562,7 @@ static struct spi_driver wl1271_spi_driver = {
};
module_spi_driver(wl1271_spi_driver);
+MODULE_DESCRIPTION("TI WLAN SPI helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 88f760a7cbc3..ef76850d9bcd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,13 +104,12 @@ bool provides_xdp_headroom = true;
module_param(provides_xdp_headroom, bool, 0644);
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
- u8 status);
+ s8 status);
static void make_tx_response(struct xenvif_queue *queue,
- struct xen_netif_tx_request *txp,
+ const struct xen_netif_tx_request *txp,
unsigned int extra_count,
- s8 st);
-static void push_tx_responses(struct xenvif_queue *queue);
+ s8 status);
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
@@ -208,13 +207,9 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
unsigned int extra_count, RING_IDX end)
{
RING_IDX cons = queue->tx.req_cons;
- unsigned long flags;
do {
- spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
- push_tx_responses(queue);
- spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
RING_COPY_REQUEST(&queue->tx, cons++, txp);
@@ -463,12 +458,20 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
}
for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
- shinfo->nr_frags++, gop++, nr_slots--) {
+ nr_slots--) {
+ if (unlikely(!txp->size)) {
+ make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
+ ++txp;
+ continue;
+ }
+
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp,
txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+ ++shinfo->nr_frags;
+ ++gop;
if (txp == first)
txp = txfrags;
@@ -481,20 +484,33 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
- for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
- shinfo->nr_frags++, txp++, gop++) {
+ for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
+ if (unlikely(!txp->size)) {
+ make_tx_response(queue, txp, 0,
+ XEN_NETIF_RSP_OKAY);
+ continue;
+ }
+
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
+ ++shinfo->nr_frags;
+ ++gop;
+ }
+
+ if (shinfo->nr_frags) {
+ skb_shinfo(skb)->frag_list = nskb;
+ nskb = NULL;
}
+ }
- skb_shinfo(skb)->frag_list = nskb;
- } else if (nskb) {
+ if (nskb) {
/* A frag_list skb was allocated but it is no longer needed
- * because enough slots were converted to copy ops above.
+ * because enough slots were converted to copy ops above or some
+ * were empty.
*/
kfree_skb(nskb);
}
@@ -963,7 +979,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
(ret == 0) ?
XEN_NETIF_RSP_OKAY :
XEN_NETIF_RSP_ERROR);
- push_tx_responses(queue);
continue;
}
@@ -975,7 +990,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
make_tx_response(queue, &txreq, extra_count,
XEN_NETIF_RSP_OKAY);
- push_tx_responses(queue);
continue;
}
@@ -1401,8 +1415,35 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
return work_done;
}
+static void _make_tx_response(struct xenvif_queue *queue,
+ const struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+ s8 status)
+{
+ RING_IDX i = queue->tx.rsp_prod_pvt;
+ struct xen_netif_tx_response *resp;
+
+ resp = RING_GET_RESPONSE(&queue->tx, i);
+ resp->id = txp->id;
+ resp->status = status;
+
+ while (extra_count-- != 0)
+ RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+
+ queue->tx.rsp_prod_pvt = ++i;
+}
+
+static void push_tx_responses(struct xenvif_queue *queue)
+{
+ int notify;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
+}
+
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
- u8 status)
+ s8 status)
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
@@ -1412,8 +1453,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
spin_lock_irqsave(&queue->response_lock, flags);
- make_tx_response(queue, &pending_tx_info->req,
- pending_tx_info->extra_count, status);
+ _make_tx_response(queue, &pending_tx_info->req,
+ pending_tx_info->extra_count, status);
/* Release the pending index before pusing the Tx response so
* its available before a new Tx request is pushed by the
@@ -1427,32 +1468,19 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
spin_unlock_irqrestore(&queue->response_lock, flags);
}
-
static void make_tx_response(struct xenvif_queue *queue,
- struct xen_netif_tx_request *txp,
+ const struct xen_netif_tx_request *txp,
unsigned int extra_count,
- s8 st)
+ s8 status)
{
- RING_IDX i = queue->tx.rsp_prod_pvt;
- struct xen_netif_tx_response *resp;
-
- resp = RING_GET_RESPONSE(&queue->tx, i);
- resp->id = txp->id;
- resp->status = st;
-
- while (extra_count-- != 0)
- RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
+ unsigned long flags;
- queue->tx.rsp_prod_pvt = ++i;
-}
+ spin_lock_irqsave(&queue->response_lock, flags);
-static void push_tx_responses(struct xenvif_queue *queue)
-{
- int notify;
+ _make_tx_response(queue, txp, extra_count, status);
+ push_tx_responses(queue);
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
- if (notify)
- notify_remote_via_irq(queue->tx_irq);
+ spin_unlock_irqrestore(&queue->response_lock, flags);
}
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
@@ -1750,5 +1778,6 @@ static void __exit netback_fini(void)
}
module_exit(netback_fini);
+MODULE_DESCRIPTION("Xen backend network device module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vif");
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index a23ab5c968b9..a3455f1d67fa 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -471,4 +471,5 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+MODULE_DESCRIPTION("NVMe Authentication framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c
index a5c0431c101c..6f7e7a8fa5ae 100644
--- a/drivers/nvme/common/keyring.c
+++ b/drivers/nvme/common/keyring.c
@@ -181,5 +181,6 @@ static void __exit nvme_keyring_exit(void)
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+MODULE_DESCRIPTION("NVMe Keyring implementation");
module_init(nvme_keyring_init);
module_exit(nvme_keyring_exit);
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 596bb11eeba5..c727cd1f264b 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -797,6 +797,7 @@ static int apple_nvme_init_request(struct blk_mq_tag_set *set,
static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
bool dead = false, freeze = false;
unsigned long flags;
@@ -808,8 +809,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
if (csts & NVME_CSTS_CFS)
dead = true;
- if (anv->ctrl.state == NVME_CTRL_LIVE ||
- anv->ctrl.state == NVME_CTRL_RESETTING) {
+ if (state == NVME_CTRL_LIVE ||
+ state == NVME_CTRL_RESETTING) {
freeze = true;
nvme_start_freeze(&anv->ctrl);
}
@@ -881,7 +882,7 @@ static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
unsigned long flags;
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
- if (anv->ctrl.state != NVME_CTRL_LIVE) {
+ if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) {
/*
* From rdma.c:
* If we are resetting, connecting or deleting we should
@@ -985,10 +986,10 @@ static void apple_nvme_reset_work(struct work_struct *work)
u32 boot_status, aqa;
struct apple_nvme *anv =
container_of(work, struct apple_nvme, ctrl.reset_work);
+ enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
- if (anv->ctrl.state != NVME_CTRL_RESETTING) {
- dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
- anv->ctrl.state);
+ if (state != NVME_CTRL_RESETTING) {
+ dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state);
ret = -ENODEV;
goto out;
}
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 72c0525c75f5..a264b3ae078b 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -48,11 +48,6 @@ struct nvme_dhchap_queue_context {
static struct workqueue_struct *nvme_auth_wq;
-#define nvme_auth_flags_from_qid(qid) \
- (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
-#define nvme_auth_queue_from_qid(ctrl, qid) \
- (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
-
static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
{
return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
@@ -63,10 +58,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
void *data, size_t data_len, bool auth_send)
{
struct nvme_command cmd = {};
- blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
- struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+ nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
+ struct request_queue *q = ctrl->fabrics_q;
int ret;
+ if (qid != 0) {
+ flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
+ q = ctrl->connect_q;
+ }
+
cmd.auth_common.opcode = nvme_fabrics_command;
cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
cmd.auth_common.spsp0 = 0x01;
@@ -80,8 +80,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
}
ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
- qid == 0 ? NVME_QID_ANY : qid,
- 0, flags);
+ qid == 0 ? NVME_QID_ANY : qid, flags);
if (ret > 0)
dev_warn(ctrl->device,
"qid %d auth_send failed with status %d\n", qid, ret);
@@ -897,7 +896,7 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
* If the ctrl is no connected, bail as reconnect will handle
* authentication.
*/
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return;
/* Authenticate admin queue first */
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 20f46c230885..6f2ebb5fcdb0 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -171,15 +171,15 @@ static const char * const nvme_statuses[] = {
[NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
};
-const unsigned char *nvme_get_error_status_str(u16 status)
+const char *nvme_get_error_status_str(u16 status)
{
status &= 0x7ff;
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
- return nvme_statuses[status & 0x7ff];
+ return nvme_statuses[status];
return "Unknown";
}
-const unsigned char *nvme_get_opcode_str(u8 opcode)
+const char *nvme_get_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
return nvme_ops[opcode];
@@ -187,7 +187,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
}
EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
-const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+const char *nvme_get_admin_opcode_str(u8 opcode)
{
if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
return nvme_admin_ops[opcode];
@@ -195,7 +195,7 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
}
EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);
-const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) {
+const char *nvme_get_fabrics_opcode_str(u8 opcode) {
if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
return nvme_fabrics_ops[opcode];
return "Unknown";
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 85ab0fcf9e88..0a96362912ce 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -338,6 +338,30 @@ static void nvme_log_error(struct request *req)
nr->status & NVME_SC_DNR ? "DNR " : "");
}
+static void nvme_log_err_passthru(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ struct nvme_request *nr = nvme_req(req);
+
+ pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
+ "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n",
+ ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
+ ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
+ nvme_get_admin_opcode_str(nr->cmd->common.opcode),
+ nr->cmd->common.opcode,
+ nvme_get_error_status_str(nr->status),
+ nr->status >> 8 & 7, /* Status Code Type */
+ nr->status & 0xff, /* Status Code */
+ nr->status & NVME_SC_MORE ? "MORE " : "",
+ nr->status & NVME_SC_DNR ? "DNR " : "",
+ nr->cmd->common.cdw10,
+ nr->cmd->common.cdw11,
+ nr->cmd->common.cdw12,
+ nr->cmd->common.cdw13,
+ nr->cmd->common.cdw14,
+ nr->cmd->common.cdw14);
+}
+
enum nvme_disposition {
COMPLETE,
RETRY,
@@ -385,8 +409,12 @@ static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
- nvme_log_error(req);
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
+ if (blk_rq_is_passthrough(req))
+ nvme_log_err_passthru(req);
+ else
+ nvme_log_error(req);
+ }
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
@@ -679,10 +707,21 @@ static inline void nvme_clear_nvme_request(struct request *req)
/* initialize a passthrough request */
void nvme_init_request(struct request *req, struct nvme_command *cmd)
{
- if (req->q->queuedata)
+ struct nvme_request *nr = nvme_req(req);
+ bool logging_enabled;
+
+ if (req->q->queuedata) {
+ struct nvme_ns *ns = req->q->disk->private_data;
+
+ logging_enabled = ns->head->passthru_err_log_enabled;
req->timeout = NVME_IO_TIMEOUT;
- else /* no queuedata implies admin queue */
+ } else { /* no queuedata implies admin queue */
+ logging_enabled = nr->ctrl->passthru_err_log_enabled;
req->timeout = NVME_ADMIN_TIMEOUT;
+ }
+
+ if (!logging_enabled)
+ req->rq_flags |= RQF_QUIET;
/* passthru commands should let the driver set the SGL flags */
cmd->common.flags &= ~NVME_CMD_SGL_ALL;
@@ -691,8 +730,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
if (req->mq_hctx->type == HCTX_TYPE_POLL)
req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req);
- req->rq_flags |= RQF_QUIET;
- memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
+ memcpy(nr->cmd, cmd, sizeof(*cmd));
}
EXPORT_SYMBOL_GPL(nvme_init_request);
@@ -721,7 +759,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live)
+ bool queue_live, enum nvme_ctrl_state state)
{
struct nvme_request *req = nvme_req(rq);
@@ -742,7 +780,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
* command, which is require to set the queue live in the
* appropinquate states.
*/
- switch (nvme_ctrl_state(ctrl)) {
+ switch (state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
(req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
@@ -1051,20 +1089,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int qid, int at_head, blk_mq_req_flags_t flags)
+ int qid, nvme_submit_flags_t flags)
{
struct request *req;
int ret;
+ blk_mq_req_flags_t blk_flags = 0;
+ if (flags & NVME_SUBMIT_NOWAIT)
+ blk_flags |= BLK_MQ_REQ_NOWAIT;
+ if (flags & NVME_SUBMIT_RESERVED)
+ blk_flags |= BLK_MQ_REQ_RESERVED;
if (qid == NVME_QID_ANY)
- req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+ req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
else
- req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
+ req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
qid - 1);
if (IS_ERR(req))
return PTR_ERR(req);
nvme_init_request(req, cmd);
+ if (flags & NVME_SUBMIT_RETRY)
+ req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -1072,7 +1117,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- ret = nvme_execute_rq(req, at_head);
+ ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD);
if (result && ret >= 0)
*result = nvme_req(req)->result;
out:
@@ -1085,7 +1130,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1108,6 +1153,10 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
} else {
effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+
+ /* Ignore execution restrictions if any relaxation bits are set */
+ if (effects & NVME_CMD_EFFECTS_CSER_MASK)
+ effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
}
return effects;
@@ -1560,7 +1609,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, NVME_QID_ANY, 0, 0);
+ buffer, buflen, NVME_QID_ANY, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -2172,7 +2221,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- NVME_QID_ANY, 1, 0);
+ NVME_QID_ANY, NVME_SUBMIT_AT_HEAD);
}
static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
@@ -3714,6 +3763,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
+ /*
+ * Set ns->disk->device->driver_data to ns so we can access
+ * ns->head->passthru_err_log_enabled in
+ * nvme_io_passthru_err_log_enabled_[store | show]().
+ */
+ dev_set_drvdata(disk_to_dev(ns->disk), ns);
+
return;
out_cleanup_ns_from_list:
@@ -4138,6 +4194,7 @@ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
struct nvme_fw_slot_info_log *log;
+ u8 next_fw_slot, cur_fw_slot;
log = kmalloc(sizeof(*log), GFP_KERNEL);
if (!log)
@@ -4149,13 +4206,15 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
goto out_free_log;
}
- if (log->afi & 0x70 || !(log->afi & 0x7)) {
+ cur_fw_slot = log->afi & 0x7;
+ next_fw_slot = (log->afi & 0x70) >> 4;
+ if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) {
dev_info(ctrl->device,
"Firmware is activated after next Controller Level Reset\n");
goto out_free_log;
}
- memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1],
+ memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1],
sizeof(ctrl->subsys->firmware_rev));
out_free_log:
@@ -4514,6 +4573,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
int ret;
WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
+ ctrl->passthru_err_log_enabled = false;
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
@@ -4851,5 +4911,6 @@ static void __exit nvme_core_exit(void)
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("NVMe host core framework");
module_init(nvme_core_init);
module_exit(nvme_core_exit);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index b5752a77ad98..495c171daead 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -450,8 +450,10 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
- data, sizeof(*data), NVME_QID_ANY, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), NVME_QID_ANY,
+ NVME_SUBMIT_AT_HEAD |
+ NVME_SUBMIT_NOWAIT |
+ NVME_SUBMIT_RESERVED);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -525,11 +527,14 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
- data, sizeof(*data), qid, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ data, sizeof(*data), qid,
+ NVME_SUBMIT_AT_HEAD |
+ NVME_SUBMIT_RESERVED |
+ NVME_SUBMIT_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
+ goto out_free_data;
}
result = le32_to_cpu(res.u32);
if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
@@ -1488,6 +1493,7 @@ static void __exit nvmf_exit(void)
}
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NVMe host fabrics library");
module_init(nvmf_init);
module_exit(nvmf_exit);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index fbaee5a7be19..06cc54851b1b 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -185,9 +185,11 @@ static inline bool
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
- if (ctrl->state == NVME_CTRL_DELETING ||
- ctrl->state == NVME_CTRL_DELETING_NOIO ||
- ctrl->state == NVME_CTRL_DEAD ||
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (state == NVME_CTRL_DELETING ||
+ state == NVME_CTRL_DELETING_NOIO ||
+ state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
!uuid_equal(&opts->host->id, &ctrl->opts->host->id))
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 16847a316421..68a5d971657b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
-static struct workqueue_struct *nvme_fc_wq;
-
-static bool nvme_fc_waiting_to_unload;
-static DECLARE_COMPLETION(nvme_fc_unload_proceed);
-
/*
* These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init.
@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
/* remove from transport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
list_del(&lport->port_list);
- if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
- complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
@@ -2574,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
+ u16 qnum = op->queue->qnum;
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
@@ -2582,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
* will detect the aborted io and will fail the connection.
*/
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
+ "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: "
"x%08x/x%08x\n",
- ctrl->cnum, op->queue->qnum, sqe->common.opcode,
- sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
+ ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype,
+ nvme_fabrics_opcode_str(qnum, sqe),
+ sqe->common.cdw10, sqe->common.cdw11);
if (__nvme_fc_abort_op(ctrl, op))
nvme_fc_error_recovery(ctrl, "io timeout abort failed");
@@ -3575,8 +3570,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
flush_delayed_work(&ctrl->connect_work);
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
- ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
+ "NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n",
+ ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn);
return &ctrl->ctrl;
@@ -3894,10 +3889,6 @@ static int __init nvme_fc_init_module(void)
{
int ret;
- nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
- if (!nvme_fc_wq)
- return -ENOMEM;
-
/*
* NOTE:
* It is expected that in the future the kernel will combine
@@ -3915,7 +3906,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class);
if (ret) {
pr_err("couldn't register class fc\n");
- goto out_destroy_wq;
+ return ret;
}
/*
@@ -3939,8 +3930,6 @@ out_destroy_device:
device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
class_unregister(&fc_class);
-out_destroy_wq:
- destroy_workqueue(nvme_fc_wq);
return ret;
}
@@ -3960,48 +3949,27 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
spin_unlock(&rport->lock);
}
-static void
-nvme_fc_cleanup_for_unload(void)
+static void __exit nvme_fc_exit_module(void)
{
struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport;
-
- list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
- list_for_each_entry(rport, &lport->endp_list, endp_list) {
- nvme_fc_delete_controllers(rport);
- }
- }
-}
-
-static void __exit nvme_fc_exit_module(void)
-{
unsigned long flags;
- bool need_cleanup = false;
spin_lock_irqsave(&nvme_fc_lock, flags);
- nvme_fc_waiting_to_unload = true;
- if (!list_empty(&nvme_fc_lport_list)) {
- need_cleanup = true;
- nvme_fc_cleanup_for_unload();
- }
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
+ list_for_each_entry(rport, &lport->endp_list, endp_list)
+ nvme_fc_delete_controllers(rport);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
- if (need_cleanup) {
- pr_info("%s: waiting for ctlr deletes\n", __func__);
- wait_for_completion(&nvme_fc_unload_proceed);
- pr_info("%s: ctrl deletes complete\n", __func__);
- }
+ flush_workqueue(nvme_delete_wq);
nvmf_unregister_transport(&nvme_fc_transport);
- ida_destroy(&nvme_fc_local_port_cnt);
- ida_destroy(&nvme_fc_ctrl_cnt);
-
device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class);
- destroy_workqueue(nvme_fc_wq);
}
module_init(nvme_fc_init_module);
module_exit(nvme_fc_exit_module);
+MODULE_DESCRIPTION("NVMe host FC transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 18f5c1be5d67..3dfd5ae99ae0 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -228,7 +228,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
length = (io.nblocks + 1) << ns->head->lba_shift;
if ((io.control & NVME_RW_PRINFO_PRACT) &&
- ns->head->ms == sizeof(struct t10_pi_tuple)) {
+ (ns->head->ms == ns->head->pi_size)) {
/*
* Protection information is stripped/inserted by the
* controller.
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2dd4137a08b2..74de1e64aeea 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -156,7 +156,7 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
- if (ctrl->state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
up_read(&ctrl->namespaces_rwsem);
@@ -223,13 +223,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
static bool nvme_path_is_disabled(struct nvme_ns *ns)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl);
+
/*
* We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
* still be able to complete assuming that the controller is connected.
* Otherwise it will fail immediately and return to the requeue list.
*/
- if (ns->ctrl->state != NVME_CTRL_LIVE &&
- ns->ctrl->state != NVME_CTRL_DELETING)
+ if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING)
return true;
if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
!test_bit(NVME_NS_READY, &ns->flags))
@@ -331,7 +332,7 @@ out:
static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
{
- return ns->ctrl->state == NVME_CTRL_LIVE &&
+ return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
ns->ana_state == NVME_ANA_OPTIMIZED;
}
@@ -358,7 +359,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
continue;
- switch (ns->ctrl->state) {
+ switch (nvme_ctrl_state(ns->ctrl)) {
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
@@ -667,7 +668,7 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
* controller is ready.
*/
if (nvme_state_is_live(ns->ana_state) &&
- ns->ctrl->state == NVME_CTRL_LIVE)
+ nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
}
@@ -748,7 +749,7 @@ static void nvme_ana_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
- if (ctrl->state != NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return;
nvme_read_ana_log(ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 030c80818240..7b87763e2f8a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -263,6 +263,7 @@ enum nvme_ctrl_flags {
struct nvme_ctrl {
bool comp_seen;
bool identified;
+ bool passthru_err_log_enabled;
enum nvme_ctrl_state state;
spinlock_t lock;
struct mutex scan_lock;
@@ -454,6 +455,7 @@ struct nvme_ns_head {
struct list_head entry;
struct kref ref;
bool shared;
+ bool passthru_err_log_enabled;
int instance;
struct nvme_effects_log *effects;
u64 nuse;
@@ -522,7 +524,6 @@ struct nvme_ns {
struct device cdev_device;
struct nvme_fault_inject fault_inject;
-
};
/* NVMe ns supports metadata actions by the controller (generate/strip) */
@@ -805,17 +806,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *req);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
- bool queue_live);
+ bool queue_live, enum nvme_ctrl_state state);
static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
- if (likely(ctrl->state == NVME_CTRL_LIVE))
+ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ if (likely(state == NVME_CTRL_LIVE))
return true;
- if (ctrl->ops->flags & NVME_F_FABRICS &&
- ctrl->state == NVME_CTRL_DELETING)
+ if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING)
return queue_live;
- return __nvme_check_ready(ctrl, rq, queue_live);
+ return __nvme_check_ready(ctrl, rq, queue_live, state);
}
/*
@@ -836,12 +838,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
}
+/*
+ * Flags for __nvme_submit_sync_cmd()
+ */
+typedef __u32 __bitwise nvme_submit_flags_t;
+
+enum {
+ /* Insert request at the head of the queue */
+ NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0),
+ /* Set BLK_MQ_REQ_NOWAIT when allocating request */
+ NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
+ /* Set BLK_MQ_REQ_RESERVED when allocating request */
+ NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
+ /* Retry command when NVME_SC_DNR is not set in the result */
+ NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
+};
+
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- int qid, int at_head,
- blk_mq_req_flags_t flags);
+ int qid, nvme_submit_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
@@ -1124,35 +1141,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
}
#ifdef CONFIG_NVME_VERBOSE_ERRORS
-const unsigned char *nvme_get_error_status_str(u16 status);
-const unsigned char *nvme_get_opcode_str(u8 opcode);
-const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
-const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
+const char *nvme_get_error_status_str(u16 status);
+const char *nvme_get_opcode_str(u8 opcode);
+const char *nvme_get_admin_opcode_str(u8 opcode);
+const char *nvme_get_fabrics_opcode_str(u8 opcode);
#else /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const unsigned char *nvme_get_error_status_str(u16 status)
+static inline const char *nvme_get_error_status_str(u16 status)
{
return "I/O Error";
}
-static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
+static inline const char *nvme_get_opcode_str(u8 opcode)
{
return "I/O Cmd";
}
-static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
+static inline const char *nvme_get_admin_opcode_str(u8 opcode)
{
return "Admin Cmd";
}
-static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
+static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
{
return "Fabrics Cmd";
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
+static inline const char *nvme_opcode_str(int qid, u8 opcode)
{
- if (opcode == nvme_fabrics_command)
- return nvme_get_fabrics_opcode_str(fctype);
return qid ? nvme_get_opcode_str(opcode) :
nvme_get_admin_opcode_str(opcode);
}
+
+static inline const char *nvme_fabrics_opcode_str(
+ int qid, const struct nvme_command *cmd)
+{
+ if (nvme_is_fabrics(cmd))
+ return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
+
+ return nvme_opcode_str(qid, cmd->common.opcode);
+}
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c1d6357ec98a..e6267a6aa380 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1349,7 +1349,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
dev_warn(dev->ctrl.device,
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
req->tag, nvme_cid(req), opcode,
- nvme_opcode_str(nvmeq->qid, opcode, 0), nvmeq->qid);
+ nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
goto disable;
}
@@ -3543,5 +3543,6 @@ static void __exit nvme_exit(void)
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("NVMe host PCIe transport driver");
module_init(nvme_init);
module_exit(nvme_exit);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 11dde0d83044..20fdd40b1879 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1410,6 +1410,8 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
struct nvme_ns *ns = rq->q->queuedata;
struct bio *bio = rq->bio;
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ u32 xfer_len;
int nr;
req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
@@ -1422,8 +1424,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
if (unlikely(nr))
goto mr_put;
- nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
- req->mr->sig_attrs, ns->head->pi_type);
+ nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type);
nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
@@ -1441,7 +1442,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
IB_ACCESS_REMOTE_WRITE;
sg->addr = cpu_to_le64(req->mr->iova);
- put_unaligned_le24(req->mr->length, sg->length);
+ xfer_len = req->mr->length;
+ /* Check if PI is added by the HW */
+ if (!pi_count)
+ xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size;
+ put_unaligned_le24(xfer_len, sg->length);
put_unaligned_le32(req->mr->rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
@@ -1946,14 +1951,13 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- u8 opcode = req->req.cmd->common.opcode;
- u8 fctype = req->req.cmd->fabrics.fctype;
+ struct nvme_command *cmd = req->req.cmd;
int qid = nvme_rdma_queue_idx(queue);
dev_warn(ctrl->ctrl.device,
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
- rq->tag, nvme_cid(rq), opcode,
- nvme_opcode_str(qid, opcode, fctype), qid);
+ rq->tag, nvme_cid(rq), cmd->common.opcode,
+ nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
/*
@@ -2296,8 +2300,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
if (ret)
goto out_uninit_ctrl;
- dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
- nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n",
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
@@ -2400,4 +2404,5 @@ static void __exit nvme_rdma_cleanup_module(void)
module_init(nvme_rdma_init_module);
module_exit(nvme_rdma_cleanup_module);
+MODULE_DESCRIPTION("NVMe host RDMA transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index 754e91111042..f2832f70e7e0 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -35,6 +35,31 @@ static ssize_t nvme_sysfs_rescan(struct device *dev,
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf,
+ ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
+}
+
+static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ bool passthru_err_log_enabled;
+ int err;
+
+ err = kstrtobool(buf, &passthru_err_log_enabled);
+ if (err)
+ return -EINVAL;
+
+ ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
+
+ return count;
+}
+
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{
struct gendisk *disk = dev_to_disk(dev);
@@ -44,6 +69,37 @@ static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
return nvme_get_ns_from_dev(dev)->head;
}
+static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+
+ return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
+}
+
+static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+ bool passthru_err_log_enabled;
+ int err;
+
+ err = kstrtobool(buf, &passthru_err_log_enabled);
+ if (err)
+ return -EINVAL;
+ head->passthru_err_log_enabled = passthru_err_log_enabled;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
+ __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
+ nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
+
+static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
+ __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
+ nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
+
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -208,6 +264,7 @@ static struct attribute *nvme_ns_attrs[] = {
&dev_attr_ana_grpid.attr,
&dev_attr_ana_state.attr,
#endif
+ &dev_attr_io_passthru_err_log_enabled.attr,
NULL,
};
@@ -311,6 +368,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned state = (unsigned)nvme_ctrl_state(ctrl);
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
@@ -321,9 +379,8 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
[NVME_CTRL_DEAD] = "dead",
};
- if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
- state_name[ctrl->state])
- return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
+ if (state < ARRAY_SIZE(state_name) && state_name[state])
+ return sysfs_emit(buf, "%s\n", state_name[state]);
return sysfs_emit(buf, "unknown state\n");
}
@@ -655,6 +712,7 @@ static struct attribute *nvme_dev_attrs[] = {
#ifdef CONFIG_NVME_TCP_TLS
&dev_attr_tls_key.attr,
#endif
+ &dev_attr_adm_passthru_err_log_enabled.attr,
NULL
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index d058d990532b..a6d596e05602 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2428,13 +2428,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
- u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
+ struct nvme_command *cmd = &pdu->cmd;
int qid = nvme_tcp_queue_id(req->queue);
dev_warn(ctrl->device,
"I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
- rq->tag, nvme_cid(rq), pdu->hdr.type, opc,
- nvme_opcode_str(qid, opc, fctype), qid);
+ rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
+ nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
/*
@@ -2753,8 +2753,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
if (ret)
goto out_uninit_ctrl;
- dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
- nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
@@ -2826,4 +2826,5 @@ static void __exit nvme_tcp_cleanup_module(void)
module_init(nvme_tcp_init_module);
module_exit(nvme_tcp_cleanup_module);
+MODULE_DESCRIPTION("NVMe host TCP transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index d26aa30f8702..8658e9c08534 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -248,7 +248,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
continue;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_NS_CHANGED,
NVME_LOG_CHANGED_NS);
}
@@ -265,7 +265,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys,
continue;
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
continue;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
}
mutex_unlock(&subsys->lock);
@@ -1705,4 +1705,5 @@ static void __exit nvmet_exit(void)
module_init(nvmet_init);
module_exit(nvmet_exit);
+MODULE_DESCRIPTION("NVMe target core framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 668d257fa986..68e82ccc0e4e 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -21,7 +21,7 @@ static void __nvmet_disc_changed(struct nvmet_port *port,
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
return;
- nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index d8da840a1c0e..9964ffe347d2 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -209,7 +209,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
- u16 status = 0;
+ u16 status;
int ret;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
@@ -290,7 +290,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl;
u16 qid = le16_to_cpu(c->qid);
- u16 status = 0;
+ u16 status;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index bda7a3009e85..fd229f310c93 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -111,6 +111,8 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
+
+ struct work_struct put_work;
};
struct nvmet_fc_port_entry {
@@ -145,7 +147,6 @@ struct nvmet_fc_tgt_queue {
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
- struct rcu_head rcu;
/* array of fcp_iods */
struct nvmet_fc_fcp_iod fod[] __counted_by(sqsize);
} __aligned(sizeof(unsigned long long));
@@ -166,10 +167,9 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_hostport *hostport;
struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
- struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
+ struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
- struct rcu_head rcu;
};
@@ -249,6 +249,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+static void nvmet_fc_put_tgtport_work(struct work_struct *work)
+{
+ struct nvmet_fc_tgtport *tgtport =
+ container_of(work, struct nvmet_fc_tgtport, put_work);
+
+ nvmet_fc_tgtport_put(tgtport);
+}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
@@ -360,7 +367,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
if (!lsop->req_queued) {
spin_unlock_irqrestore(&tgtport->lock, flags);
- return;
+ goto out_putwork;
}
list_del(&lsop->lsreq_list);
@@ -373,7 +380,8 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
(lsreq->rqstlen + lsreq->rsplen),
DMA_BIDIRECTIONAL);
- nvmet_fc_tgtport_put(tgtport);
+out_putwork:
+ queue_work(nvmet_wq, &tgtport->put_work);
}
static int
@@ -489,8 +497,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
* message is normal. Otherwise, send unless the hostport has
* already been invalidated by the lldd.
*/
- if (!tgtport->ops->ls_req || !assoc->hostport ||
- assoc->hostport->invalid)
+ if (!tgtport->ops->ls_req || assoc->hostport->invalid)
return;
lsop = kzalloc((sizeof(*lsop) +
@@ -802,14 +809,11 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue)
return NULL;
- if (!nvmet_fc_tgt_a_get(assoc))
- goto out_free_queue;
-
queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
assoc->tgtport->fc_target_port.port_num,
assoc->a_id, qid);
if (!queue->work_q)
- goto out_a_put;
+ goto out_free_queue;
queue->qid = qid;
queue->sqsize = sqsize;
@@ -831,15 +835,13 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;
WARN_ON(assoc->queues[qid]);
- rcu_assign_pointer(assoc->queues[qid], queue);
+ assoc->queues[qid] = queue;
return queue;
out_fail_iodlist:
nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
destroy_workqueue(queue->work_q);
-out_a_put:
- nvmet_fc_tgt_a_put(assoc);
out_free_queue:
kfree(queue);
return NULL;
@@ -852,15 +854,11 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);
- rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
-
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
- nvmet_fc_tgt_a_put(queue->assoc);
-
destroy_workqueue(queue->work_q);
- kfree_rcu(queue, rcu);
+ kfree(queue);
}
static void
@@ -969,7 +967,7 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
- queue = rcu_dereference(assoc->queues[qid]);
+ queue = assoc->queues[qid];
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
@@ -1078,8 +1076,6 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
/* new allocation not needed */
kfree(newhost);
newhost = match;
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
} else {
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
@@ -1094,23 +1090,54 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
static void
-nvmet_fc_delete_assoc(struct work_struct *work)
+nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+}
+
+static void
+nvmet_fc_delete_assoc_work(struct work_struct *work)
{
struct nvmet_fc_tgt_assoc *assoc =
container_of(work, struct nvmet_fc_tgt_assoc, del_work);
+ struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_delete_assoc(assoc);
+ nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+ nvmet_fc_tgtport_get(assoc->tgtport);
+ queue_work(nvmet_wq, &assoc->del_work);
+}
+
+static bool
+nvmet_fc_assoc_exits(struct nvmet_fc_tgtport *tgtport, u64 association_id)
+{
+ struct nvmet_fc_tgt_assoc *a;
+
+ list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) {
+ if (association_id == a->association_id)
+ return true;
+ }
+
+ return false;
}
static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{
- struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
+ struct nvmet_fc_tgt_assoc *assoc;
unsigned long flags;
+ bool done;
u64 ran;
int idx;
- bool needrandom = true;
+
+ if (!tgtport->pe)
+ return NULL;
assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
if (!assoc)
@@ -1120,43 +1147,35 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
if (idx < 0)
goto out_free_assoc;
- if (!nvmet_fc_tgtport_get(tgtport))
- goto out_ida;
-
assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
if (IS_ERR(assoc->hostport))
- goto out_put;
+ goto out_ida;
assoc->tgtport = tgtport;
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
- INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
+ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work);
atomic_set(&assoc->terminating, 0);
- while (needrandom) {
+ done = false;
+ do {
get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
ran = ran << BYTES_FOR_QID_SHIFT;
spin_lock_irqsave(&tgtport->lock, flags);
- needrandom = false;
- list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
- if (ran == tmpassoc->association_id) {
- needrandom = true;
- break;
- }
- }
- if (!needrandom) {
+ rcu_read_lock();
+ if (!nvmet_fc_assoc_exits(tgtport, ran)) {
assoc->association_id = ran;
list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
+ done = true;
}
+ rcu_read_unlock();
spin_unlock_irqrestore(&tgtport->lock, flags);
- }
+ } while (!done);
return assoc;
-out_put:
- nvmet_fc_tgtport_put(tgtport);
out_ida:
ida_free(&tgtport->assoc_cnt, idx);
out_free_assoc:
@@ -1172,13 +1191,18 @@ nvmet_fc_target_assoc_free(struct kref *ref)
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_ls_iod *oldls;
unsigned long flags;
+ int i;
+
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+ if (assoc->queues[i])
+ nvmet_fc_delete_target_queue(assoc->queues[i]);
+ }
/* Send Disconnect now that all i/o has completed */
nvmet_fc_xmt_disconnect_assoc(assoc);
nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
- list_del_rcu(&assoc->a_list);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1188,8 +1212,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
- kfree_rcu(assoc, rcu);
- nvmet_fc_tgtport_put(tgtport);
+ kfree(assoc);
}
static void
@@ -1208,7 +1231,7 @@ static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
- struct nvmet_fc_tgt_queue *queue;
+ unsigned long flags;
int i, terminating;
terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1217,29 +1240,21 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
if (terminating)
return;
+ spin_lock_irqsave(&tgtport->lock, flags);
+ list_del_rcu(&assoc->a_list);
+ spin_unlock_irqrestore(&tgtport->lock, flags);
- for (i = NVMET_NR_QUEUES; i >= 0; i--) {
- rcu_read_lock();
- queue = rcu_dereference(assoc->queues[i]);
- if (!queue) {
- rcu_read_unlock();
- continue;
- }
+ synchronize_rcu();
- if (!nvmet_fc_tgt_q_get(queue)) {
- rcu_read_unlock();
- continue;
- }
- rcu_read_unlock();
- nvmet_fc_delete_target_queue(queue);
- nvmet_fc_tgt_q_put(queue);
+ /* ensure all in-flight I/Os have been processed */
+ for (i = NVMET_NR_QUEUES; i >= 0; i--) {
+ if (assoc->queues[i])
+ flush_workqueue(assoc->queues[i]->work_q);
}
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
tgtport->fc_target_port.port_num, assoc->a_id);
-
- nvmet_fc_tgt_a_put(assoc);
}
static struct nvmet_fc_tgt_assoc *
@@ -1415,6 +1430,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
+ INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@@ -1492,9 +1508,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
}
rcu_read_unlock();
}
@@ -1540,16 +1555,14 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next,
&tgtport->assoc_list, a_list) {
- if (!assoc->hostport ||
- assoc->hostport->hosthandle != hosthandle)
+ if (assoc->hostport->hosthandle != hosthandle)
continue;
if (!nvmet_fc_tgt_a_get(assoc))
continue;
assoc->hostport->invalid = 1;
noassoc = false;
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
@@ -1581,7 +1594,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
rcu_read_lock();
list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
- queue = rcu_dereference(assoc->queues[0]);
+ queue = assoc->queues[0];
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
@@ -1593,9 +1606,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- if (!queue_work(nvmet_wq, &assoc->del_work))
- /* already deleting - release local reference */
- nvmet_fc_tgt_a_put(assoc);
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
return;
}
@@ -1625,6 +1637,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);
+ flush_workqueue(nvmet_wq);
+
/*
* should terminate LS's as well. However, LS's will be generated
* at the tail end of association termination, so they likely don't
@@ -1870,9 +1884,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
FCNVME_LS_DISCONNECT_ASSOC);
- /* release get taken in nvmet_fc_find_target_assoc */
- nvmet_fc_tgt_a_put(assoc);
-
/*
* The rules for LS response says the response cannot
* go back until ABTS's have been sent for all outstanding
@@ -1887,8 +1898,6 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc->rcv_disconn = iod;
spin_unlock_irqrestore(&tgtport->lock, flags);
- nvmet_fc_delete_target_assoc(assoc);
-
if (oldls) {
dev_info(tgtport->dev,
"{%d:%d} Multiple Disconnect Association LS's "
@@ -1904,6 +1913,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
nvmet_fc_xmt_ls_rsp(tgtport, oldls);
}
+ nvmet_fc_schedule_delete_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+
return false;
}
@@ -2540,8 +2552,9 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.cqe = &fod->rspiubuf.cqe;
- if (tgtport->pe)
- fod->req.port = tgtport->pe->port;
+ if (!tgtport->pe)
+ goto transport_error;
+ fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
@@ -2902,6 +2915,9 @@ nvmet_fc_remove_port(struct nvmet_port *port)
nvmet_fc_portentry_unbind(pe);
+ /* terminate any outstanding associations */
+ __nvmet_fc_free_assocs(pe->tgtport);
+
kfree(pe);
}
@@ -2933,6 +2949,9 @@ static int __init nvmet_fc_init_module(void)
static void __exit nvmet_fc_exit_module(void)
{
+ /* ensure any shutdown operation, e.g. delete ctrls have finished */
+ flush_workqueue(nvmet_wq);
+
/* sanity check - all lports should be removed */
if (!list_empty(&nvmet_fc_target_list))
pr_warn("%s: targetport list not empty\n", __func__);
@@ -2945,4 +2964,5 @@ static void __exit nvmet_fc_exit_module(void)
module_init(nvmet_fc_init_module);
module_exit(nvmet_fc_exit_module);
+MODULE_DESCRIPTION("NVMe target FC transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index ead349af30f1..1471af250ea6 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
spin_lock(&rport->lock);
- list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work);
return ret;
@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
if (remoteport) {
rport = remoteport->private;
spin_lock(&rport->lock);
- list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work);
}
@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
if (!tport->remoteport) {
tls_req->status = -ECONNREFUSED;
spin_lock(&tport->lock);
- list_add_tail(&tport->ls_list, &tls_req->ls_list);
+ list_add_tail(&tls_req->ls_list, &tport->ls_list);
spin_unlock(&tport->lock);
queue_work(nvmet_wq, &tport->ls_work);
return ret;
@@ -1650,4 +1650,5 @@ static void __exit fcloop_exit(void)
module_init(fcloop_init);
module_exit(fcloop_exit);
+MODULE_DESCRIPTION("NVMe target FC loop transport driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9cb434c58075..e589915ddef8 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -400,7 +400,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
}
nvme_quiesce_admin_queue(&ctrl->ctrl);
- if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
nvme_disable_ctrl(&ctrl->ctrl, true);
nvme_cancel_admin_tagset(&ctrl->ctrl);
@@ -434,8 +434,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
- if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
+ if (state != NVME_CTRL_DELETING &&
+ state != NVME_CTRL_DELETING_NOIO)
/* state change failure for non-deleted ctrl? */
WARN_ON_ONCE(1);
return;
@@ -688,5 +690,6 @@ static void __exit nvme_loop_cleanup_module(void)
module_init(nvme_loop_init_module);
module_exit(nvme_loop_cleanup_module);
+MODULE_DESCRIPTION("NVMe target loop transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 667f9c04f35d..3a0f2c170f4c 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -2104,5 +2104,6 @@ static void __exit nvmet_rdma_exit(void)
module_init(nvmet_rdma_init);
module_exit(nvmet_rdma_exit);
+MODULE_DESCRIPTION("NVMe target RDMA transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 6a1e6bb80062..c8655fc5aa5b 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -2216,10 +2216,12 @@ static void __exit nvmet_tcp_exit(void)
flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq);
+ ida_destroy(&nvmet_tcp_queue_ida);
}
module_init(nvmet_tcp_init);
module_exit(nvmet_tcp_exit);
+MODULE_DESCRIPTION("NVMe target TCP transport driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 980123fb4dde..eb357ac2e54a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -460,8 +460,9 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
list_for_each_entry(entry, &nvmem->cells, node) {
sysfs_bin_attr_init(&attrs[i]);
attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
- "%s@%x", entry->name,
- entry->offset);
+ "%s@%x,%x", entry->name,
+ entry->offset,
+ entry->bit_offset);
attrs[i].attr.mode = 0444;
attrs[i].size = entry->bytes;
attrs[i].read = &nvmem_cell_attr_read;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 641a40cf5cf3..fa8cd33be131 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -763,7 +763,9 @@ struct device_node *of_graph_get_port_parent(struct device_node *node)
/* Walk 3 levels up only if there is 'ports' node. */
for (depth = 3; depth && node; depth--) {
node = of_get_next_parent(node);
- if (depth == 2 && !of_node_name_eq(node, "ports"))
+ if (depth == 2 && !of_node_name_eq(node, "ports") &&
+ !of_node_name_eq(node, "in-ports") &&
+ !of_node_name_eq(node, "out-ports"))
break;
}
return node;
@@ -1063,36 +1065,6 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
return of_device_get_match_data(dev);
}
-static struct device_node *of_get_compat_node(struct device_node *np)
-{
- of_node_get(np);
-
- while (np) {
- if (!of_device_is_available(np)) {
- of_node_put(np);
- np = NULL;
- }
-
- if (of_property_present(np, "compatible"))
- break;
-
- np = of_get_next_parent(np);
- }
-
- return np;
-}
-
-static struct device_node *of_get_compat_node_parent(struct device_node *np)
-{
- struct device_node *parent, *node;
-
- parent = of_get_parent(np);
- node = of_get_compat_node(parent);
- of_node_put(parent);
-
- return node;
-}
-
static void of_link_to_phandle(struct device_node *con_np,
struct device_node *sup_np)
{
@@ -1222,10 +1194,10 @@ static struct device_node *parse_##fname(struct device_node *np, \
* parse_prop.prop_name: Name of property holding a phandle value
* parse_prop.index: For properties holding a list of phandles, this is the
* index into the list
+ * @get_con_dev: If the consumer node containing the property is never converted
+ * to a struct device, implement this ops so fw_devlink can use it
+ * to find the true consumer.
* @optional: Describes whether a supplier is mandatory or not
- * @node_not_dev: The consumer node containing the property is never converted
- * to a struct device. Instead, parse ancestor nodes for the
- * compatible property to find a node corresponding to a device.
*
* Returns:
* parse_prop() return values are
@@ -1236,15 +1208,15 @@ static struct device_node *parse_##fname(struct device_node *np, \
struct supplier_bindings {
struct device_node *(*parse_prop)(struct device_node *np,
const char *prop_name, int index);
+ struct device_node *(*get_con_dev)(struct device_node *np);
bool optional;
- bool node_not_dev;
};
DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
-DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
+DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells")
DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
@@ -1262,7 +1234,6 @@ DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL)
DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL)
DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
-DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL)
DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
DEFINE_SIMPLE_PROP(leds, "leds", NULL)
@@ -1328,6 +1299,17 @@ static struct device_node *parse_interrupts(struct device_node *np,
return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
}
+static struct device_node *parse_remote_endpoint(struct device_node *np,
+ const char *prop_name,
+ int index)
+{
+ /* Return NULL for index > 0 to signify end of remote-endpoints. */
+ if (index > 0 || strcmp(prop_name, "remote-endpoint"))
+ return NULL;
+
+ return of_graph_get_remote_port_parent(np);
+}
+
static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_clocks, },
{ .parse_prop = parse_interconnects, },
@@ -1352,7 +1334,10 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_pinctrl6, },
{ .parse_prop = parse_pinctrl7, },
{ .parse_prop = parse_pinctrl8, },
- { .parse_prop = parse_remote_endpoint, .node_not_dev = true, },
+ {
+ .parse_prop = parse_remote_endpoint,
+ .get_con_dev = of_graph_get_port_parent,
+ },
{ .parse_prop = parse_pwms, },
{ .parse_prop = parse_resets, },
{ .parse_prop = parse_leds, },
@@ -1403,8 +1388,8 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
while ((phandle = s->parse_prop(con_np, prop_name, i))) {
struct device_node *con_dev_np;
- con_dev_np = s->node_not_dev
- ? of_get_compat_node_parent(con_np)
+ con_dev_np = s->get_con_dev
+ ? s->get_con_dev(con_np)
: of_node_get(con_np);
matched = true;
i++;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index cfd60e35a899..d7593bde2d02 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -50,6 +50,12 @@ static struct unittest_results {
failed; \
})
+#ifdef CONFIG_OF_KOBJ
+#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref)
+#else
+#define OF_KREF_READ(NODE) 1
+#endif
+
/*
* Expected message may have a message level other than KERN_INFO.
* Print the expected message only if the current loglevel will allow
@@ -570,7 +576,7 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
pr_err("missing testcase data\n");
return;
}
- prefs[i] = kref_read(&p[i]->kobj.kref);
+ prefs[i] = OF_KREF_READ(p[i]);
}
rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
@@ -693,9 +699,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
for (i = 0; i < ARRAY_SIZE(p); ++i) {
- unittest(prefs[i] == kref_read(&p[i]->kobj.kref),
+ unittest(prefs[i] == OF_KREF_READ(p[i]),
"provider%d: expected:%d got:%d\n",
- i, prefs[i], kref_read(&p[i]->kobj.kref));
+ i, prefs[i], OF_KREF_READ(p[i]));
of_node_put(p[i]);
}
}
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 9c2137dae429..826b5016a101 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -386,21 +386,8 @@ void pci_bus_add_devices(const struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_add_devices);
-/** pci_walk_bus - walk devices on/under bus, calling callback.
- * @top bus whose devices should be walked
- * @cb callback to be called for each device found
- * @userdata arbitrary pointer to be passed to callback.
- *
- * Walk the given bus, including any bridged devices
- * on buses under this bus. Call the provided callback
- * on each device found.
- *
- * We check the return of @cb each time. If it returns anything
- * other than 0, we break out.
- *
- */
-void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
- void *userdata)
+static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata, bool locked)
{
struct pci_dev *dev;
struct pci_bus *bus;
@@ -408,7 +395,8 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
int retval;
bus = top;
- down_read(&pci_bus_sem);
+ if (!locked)
+ down_read(&pci_bus_sem);
next = top->devices.next;
for (;;) {
if (next == &bus->devices) {
@@ -431,10 +419,37 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
if (retval)
break;
}
- up_read(&pci_bus_sem);
+ if (!locked)
+ up_read(&pci_bus_sem);
+}
+
+/**
+ * pci_walk_bus - walk devices on/under bus, calling callback.
+ * @top: bus whose devices should be walked
+ * @cb: callback to be called for each device found
+ * @userdata: arbitrary pointer to be passed to callback
+ *
+ * Walk the given bus, including any bridged devices
+ * on buses under this bus. Call the provided callback
+ * on each device found.
+ *
+ * We check the return of @cb each time. If it returns anything
+ * other than 0, we break out.
+ */
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ __pci_walk_bus(top, cb, userdata, false);
}
EXPORT_SYMBOL_GPL(pci_walk_bus);
+void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ lockdep_assert_held(&pci_bus_sem);
+
+ __pci_walk_bus(top, cb, userdata, true);
+}
+EXPORT_SYMBOL_GPL(pci_walk_bus_locked);
+
struct pci_bus *pci_bus_get(struct pci_bus *bus)
{
if (bus)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 5befed2dc02b..9a437cfce073 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -6,6 +6,7 @@
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -482,9 +483,10 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
reg = ep_func->msi_cap + PCI_MSI_DATA_32;
msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
}
- aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
- msg_addr = ((u64)msg_addr_upper) << 32 |
- (msg_addr_lower & ~aligned_offset);
+ msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
+
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+ msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
@@ -551,7 +553,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
}
aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- msg_addr &= ~aligned_offset;
+ msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 10f2d0bb86be..2ce2a3bd932b 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -972,7 +972,7 @@ static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
* Downstream devices need to be in D0 state before enabling PCI PM
* substates.
*/
- pci_set_power_state(pdev, PCI_D0);
+ pci_set_power_state_locked(pdev, PCI_D0);
pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
return 0;
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index c8be056c248d..cfd84a899c82 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -61,7 +61,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
return (irq_hw_number_t)desc->msi_index |
pci_dev_id(dev) << 11 |
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
+ ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
}
static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d8f11a078924..c3585229c12a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1354,6 +1354,7 @@ end:
/**
* pci_set_full_power_state - Put a PCI device into D0 and update its state
* @dev: PCI device to power up
+ * @locked: whether pci_bus_sem is held
*
* Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
* to confirm the state change, restore its BARs if they might be lost and
@@ -1363,7 +1364,7 @@ end:
* to D0, it is more efficient to use pci_power_up() directly instead of this
* function.
*/
-static int pci_set_full_power_state(struct pci_dev *dev)
+static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
{
u16 pmcsr;
int ret;
@@ -1399,7 +1400,7 @@ static int pci_set_full_power_state(struct pci_dev *dev)
}
if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
+ pcie_aspm_pm_state_change(dev->bus->self, locked);
return 0;
}
@@ -1428,10 +1429,22 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
pci_walk_bus(bus, __pci_dev_set_current_state, &state);
}
+static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
+{
+ if (!bus)
+ return;
+
+ if (locked)
+ pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
+ else
+ pci_walk_bus(bus, __pci_dev_set_current_state, &state);
+}
+
/**
* pci_set_low_power_state - Put a PCI device into a low-power state.
* @dev: PCI device to handle.
* @state: PCI power state (D1, D2, D3hot) to put the device into.
+ * @locked: whether pci_bus_sem is held
*
* Use the device's PCI_PM_CTRL register to put it into a low-power state.
*
@@ -1442,7 +1455,7 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
* 0 if device already is in the requested state.
* 0 if device's power state has been successfully changed.
*/
-static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
+static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
{
u16 pmcsr;
@@ -1496,29 +1509,12 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
pci_power_name(state));
if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
+ pcie_aspm_pm_state_change(dev->bus->self, locked);
return 0;
}
-/**
- * pci_set_power_state - Set the power state of a PCI device
- * @dev: PCI device to handle.
- * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
- *
- * Transition a device to a new power state, using the platform firmware and/or
- * the device's PCI PM registers.
- *
- * RETURN VALUE:
- * -EINVAL if the requested state is invalid.
- * -EIO if device does not support PCI PM or its PM capabilities register has a
- * wrong version, or device doesn't support the requested state.
- * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
- * 0 if device already is in the requested state.
- * 0 if the transition is to D3 but D3 is not supported.
- * 0 if device's power state has been successfully changed.
- */
-int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
{
int error;
@@ -1542,7 +1538,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
if (state == PCI_D0)
- return pci_set_full_power_state(dev);
+ return pci_set_full_power_state(dev, locked);
/*
* This device is quirked not to be put into D3, so don't put it in
@@ -1556,16 +1552,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
* To put the device in D3cold, put it into D3hot in the native
* way, then put it into D3cold using platform ops.
*/
- error = pci_set_low_power_state(dev, PCI_D3hot);
+ error = pci_set_low_power_state(dev, PCI_D3hot, locked);
if (pci_platform_power_transition(dev, PCI_D3cold))
return error;
/* Powering off a bridge may power off the whole hierarchy */
if (dev->current_state == PCI_D3cold)
- pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
} else {
- error = pci_set_low_power_state(dev, state);
+ error = pci_set_low_power_state(dev, state, locked);
if (pci_platform_power_transition(dev, state))
return error;
@@ -1573,8 +1569,38 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
}
+
+/**
+ * pci_set_power_state - Set the power state of a PCI device
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+ *
+ * Transition a device to a new power state, using the platform firmware and/or
+ * the device's PCI PM registers.
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
+ * 0 if device already is in the requested state.
+ * 0 if the transition is to D3 but D3 is not supported.
+ * 0 if device's power state has been successfully changed.
+ */
+int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ return __pci_set_power_state(dev, state, false);
+}
EXPORT_SYMBOL(pci_set_power_state);
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{
+ lockdep_assert_held(&pci_bus_sem);
+
+ return __pci_set_power_state(dev, state, true);
+}
+EXPORT_SYMBOL(pci_set_power_state_locked);
+
#define PCI_EXP_SAVE_REGS 7
static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
@@ -2496,29 +2522,36 @@ static void pci_pme_list_scan(struct work_struct *work)
if (pdev->pme_poll) {
struct pci_dev *bridge = pdev->bus->self;
struct device *dev = &pdev->dev;
- int pm_status;
+ struct device *bdev = bridge ? &bridge->dev : NULL;
+ int bref = 0;
/*
- * If bridge is in low power state, the
- * configuration space of subordinate devices
- * may be not accessible
+ * If we have a bridge, it should be in an active/D0
+ * state or the configuration space of subordinate
+ * devices may not be accessible or stable over the
+ * course of the call.
*/
- if (bridge && bridge->current_state != PCI_D0)
- continue;
+ if (bdev) {
+ bref = pm_runtime_get_if_active(bdev, true);
+ if (!bref)
+ continue;
+
+ if (bridge->current_state != PCI_D0)
+ goto put_bridge;
+ }
/*
- * If the device is in a low power state it
- * should not be polled either.
+ * The device itself should be suspended but config
+ * space must be accessible, therefore it cannot be in
+ * D3cold.
*/
- pm_status = pm_runtime_get_if_active(dev, true);
- if (!pm_status)
- continue;
-
- if (pdev->current_state != PCI_D3cold)
+ if (pm_runtime_suspended(dev) &&
+ pdev->current_state != PCI_D3cold)
pci_pme_wakeup(pdev, NULL);
- if (pm_status > 0)
- pm_runtime_put(dev);
+put_bridge:
+ if (bref > 0)
+ pm_runtime_put(bdev);
} else {
list_del(&pme_dev->list);
kfree(pme_dev);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 2336a8d1edab..e9750b1b19ba 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -571,12 +571,12 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
+void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
+static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5a0066ecc3c5..bc0bd86695ec 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1003,8 +1003,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
-/* @pdev: the root port or switch downstream port */
-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
+/*
+ * @pdev: the root port or switch downstream port
+ * @locked: whether pci_bus_sem is held
+ */
+void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
{
struct pcie_link_state *link = pdev->link_state;
@@ -1014,12 +1017,14 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
* Devices changed PM state, we should recheck if latency
* meets all functions' requirement
*/
- down_read(&pci_bus_sem);
+ if (!locked)
+ down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
pcie_update_aspm_capable(link->root);
pcie_config_aspm_path(link);
mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
+ if (!locked)
+ up_read(&pci_bus_sem);
}
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index c584165b13ba..7e3aa7e2345f 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -2305,6 +2305,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
continue;
}
+ /*
+ * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus
+ * child count larger than the number of valid child pointers.
+ * A child offset of 0 can only occur on CMN-600; otherwise it
+ * would imply the root node being its own grandchild, which
+ * we can safely dismiss in general.
+ */
+ if (reg == 0 && cmn->part != PART_CMN600) {
+ dev_dbg(cmn->dev, "bogus child pointer?\n");
+ continue;
+ }
arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index 365d964b0f6a..308c9969642e 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -59,7 +59,7 @@
#define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59)
#define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8))
-#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(15, 0)
+#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(31, 0)
#define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n))
@@ -314,9 +314,9 @@ static bool cxl_pmu_config1_get_edge(struct perf_event *event)
}
/*
- * CPMU specification allows for 8 filters, each with a 16 bit value...
- * So we need to find 8x16bits to store it in.
- * As the value used for disable is 0xffff, a separate enable switch
+ * CPMU specification allows for 8 filters, each with a 32 bit value...
+ * So we need to find 8x32bits to store it in.
+ * As the value used for disable is 0xffff_ffff, a separate enable switch
* is needed.
*/
@@ -419,7 +419,7 @@ static struct attribute *cxl_pmu_event_attrs[] = {
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)),
- CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(3)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(4)),
/* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm, CXL_PMU_GID_S2M_DRS, BIT(1)),
@@ -642,7 +642,7 @@ static void cxl_pmu_event_start(struct perf_event *event, int flags)
if (cxl_pmu_config1_hdm_filter_en(event))
cfg = cxl_pmu_config2_get_hdm_decoder(event);
else
- cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */
+ cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */
writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
}
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 0dda70e1ef90..c78a6fd6c57f 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -150,19 +150,11 @@ u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- if (!rvpmu->ctr_get_width)
- /**
- * If the pmu driver doesn't support counter width, set it to default
- * maximum allowed by the specification.
- */
- cwidth = 63;
- else {
- if (hwc->idx == -1)
- /* Handle init case where idx is not initialized yet */
- cwidth = rvpmu->ctr_get_width(0);
- else
- cwidth = rvpmu->ctr_get_width(hwc->idx);
- }
+ if (hwc->idx == -1)
+ /* Handle init case where idx is not initialized yet */
+ cwidth = rvpmu->ctr_get_width(0);
+ else
+ cwidth = rvpmu->ctr_get_width(hwc->idx);
return GENMASK_ULL(cwidth, 0);
}
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 79fdd667922e..fa0bccf4edf2 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -37,6 +37,12 @@ static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
return pmu_legacy_ctr_get_idx(event);
}
+/* cycle & instret are always 64 bit, one bit less according to SBI spec */
+static int pmu_legacy_ctr_get_width(int idx)
+{
+ return 63;
+}
+
static u64 pmu_legacy_read_ctr(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -111,12 +117,14 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map;
pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
- pmu->ctr_get_width = NULL;
+ pmu->ctr_get_width = pmu_legacy_ctr_get_width;
pmu->ctr_clear_idx = NULL;
pmu->ctr_read = pmu_legacy_read_ctr;
pmu->event_mapped = pmu_legacy_event_mapped;
pmu->event_unmapped = pmu_legacy_event_unmapped;
pmu->csr_index = pmu_legacy_csr_index;
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
}
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 16acd4dcdb96..452aab49db1e 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -512,7 +512,7 @@ static void pmu_sbi_set_scounteren(void *arg)
if (event->hw.idx != -1)
csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
+ csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
}
static void pmu_sbi_reset_scounteren(void *arg)
@@ -521,7 +521,7 @@ static void pmu_sbi_reset_scounteren(void *arg)
if (event->hw.idx != -1)
csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
+ csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
}
static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
@@ -731,14 +731,14 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/* compute hardware counter index */
hidx = info->csr - CSR_CYCLE;
/* check if the corresponding bit is set in sscountovf */
- if (!(overflow & (1 << hidx)))
+ if (!(overflow & BIT(hidx)))
continue;
/*
* Keep a track of overflowed counters so that they can be started
* with updated initial value.
*/
- overflowed_ctrs |= 1 << lidx;
+ overflowed_ctrs |= BIT(lidx);
hw_evt = &event->hw;
riscv_pmu_event_update(event);
perf_sample_data_init(&data, 0, hw_evt->last_period);
diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
index e625b32889bf..0928a526e2ab 100644
--- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
@@ -706,7 +706,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
return ret;
}
- priv->id = of_alias_get_id(np, "mipi_dphy");
+ priv->id = of_alias_get_id(np, "mipi-dphy");
if (priv->id < 0) {
dev_err(dev, "Failed to get phy node alias id: %d\n",
priv->id);
diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c
index c1a41b6cd29b..b5ac2b7995e7 100644
--- a/drivers/phy/microchip/lan966x_serdes.c
+++ b/drivers/phy/microchip/lan966x_serdes.c
@@ -96,6 +96,8 @@ static const struct serdes_mux lan966x_serdes_muxes[] = {
SERDES_MUX_SGMII(SERDES6G(1), 3, HSIO_HW_CFG_SD6G_1_CFG,
HSIO_HW_CFG_SD6G_1_CFG_SET(1)),
+ SERDES_MUX_SGMII(SERDES6G(2), 4, 0, 0),
+
SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG |
HSIO_HW_CFG_RGMII_ENA |
HSIO_HW_CFG_GMII_ENA,
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index a623f092b11f..a43e20abb10d 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -37,56 +37,28 @@
#define EUSB2_TUNE_EUSB_EQU 0x5A
#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
-#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v) \
- { \
- .reg = r, \
- .val = v, \
- }
-
-enum reg_fields {
- F_TUNE_EUSB_HS_COMP_CUR,
- F_TUNE_EUSB_EQU,
- F_TUNE_EUSB_SLEW,
- F_TUNE_USB2_HS_COMP_CUR,
- F_TUNE_USB2_PREEM,
- F_TUNE_USB2_EQU,
- F_TUNE_USB2_SLEW,
- F_TUNE_SQUELCH_U,
- F_TUNE_HSDISC,
- F_TUNE_RES_FSDIF,
- F_TUNE_IUSB2,
- F_TUNE_USB2_CROSSOVER,
- F_NUM_TUNE_FIELDS,
-
- F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
- F_FORCE_EN_5,
-
- F_EN_CTL1,
-
- F_RPTR_STATUS,
- F_NUM_FIELDS,
-};
-
-static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
- [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
- [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
- [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
- [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
- [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
- [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
- [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
- [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
- [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
- [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
- [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
- [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
-
- [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
- [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
-
- [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
-
- [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
+enum eusb2_reg_layout {
+ TUNE_EUSB_HS_COMP_CUR,
+ TUNE_EUSB_EQU,
+ TUNE_EUSB_SLEW,
+ TUNE_USB2_HS_COMP_CUR,
+ TUNE_USB2_PREEM,
+ TUNE_USB2_EQU,
+ TUNE_USB2_SLEW,
+ TUNE_SQUELCH_U,
+ TUNE_HSDISC,
+ TUNE_RES_FSDIF,
+ TUNE_IUSB2,
+ TUNE_USB2_CROSSOVER,
+ NUM_TUNE_FIELDS,
+
+ FORCE_VAL_5 = NUM_TUNE_FIELDS,
+ FORCE_EN_5,
+
+ EN_CTL1,
+
+ RPTR_STATUS,
+ LAYOUT_SIZE,
};
struct eusb2_repeater_cfg {
@@ -98,10 +70,11 @@ struct eusb2_repeater_cfg {
struct eusb2_repeater {
struct device *dev;
- struct regmap_field *regs[F_NUM_FIELDS];
+ struct regmap *regmap;
struct phy *phy;
struct regulator_bulk_data *vregs;
const struct eusb2_repeater_cfg *cfg;
+ u32 base;
enum phy_mode mode;
};
@@ -109,10 +82,10 @@ static const char * const pm8550b_vreg_l[] = {
"vdd18", "vdd3",
};
-static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
- [F_TUNE_IUSB2] = 0x8,
- [F_TUNE_SQUELCH_U] = 0x3,
- [F_TUNE_USB2_PREEM] = 0x5,
+static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = {
+ [TUNE_IUSB2] = 0x8,
+ [TUNE_SQUELCH_U] = 0x3,
+ [TUNE_USB2_PREEM] = 0x5,
};
static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
@@ -140,47 +113,42 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
static int eusb2_repeater_init(struct phy *phy)
{
- struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
struct device_node *np = rptr->dev->of_node;
- u32 init_tbl[F_NUM_TUNE_FIELDS] = { 0 };
- u8 override;
+ struct regmap *regmap = rptr->regmap;
+ const u32 *init_tbl = rptr->cfg->init_tbl;
+ u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM];
+ u8 tune_hsdisc = init_tbl[TUNE_HSDISC];
+ u8 tune_iusb2 = init_tbl[TUNE_IUSB2];
+ u32 base = rptr->base;
u32 val;
int ret;
- int i;
+
+ of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2);
+ of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc);
+ of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem);
ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs);
if (ret)
return ret;
- regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+ regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN);
- for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
- if (init_tbl[i]) {
- regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
- } else {
- /* Write 0 if there's no value set */
- u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
-
- regmap_field_update_bits(rptr->regs[i], mask, 0);
- }
- }
- memcpy(init_tbl, rptr->cfg->init_tbl, sizeof(init_tbl));
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]);
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]);
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]);
+ regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]);
+ regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]);
- if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &override))
- init_tbl[F_TUNE_IUSB2] = override;
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem);
+ regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc);
+ regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2);
- if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &override))
- init_tbl[F_TUNE_HSDISC] = override;
-
- if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &override))
- init_tbl[F_TUNE_USB2_PREEM] = override;
-
- for (i = 0; i < F_NUM_TUNE_FIELDS; i++)
- regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
-
- ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
- val, val & RPTR_OK, 10, 5);
+ ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5);
if (ret)
dev_err(rptr->dev, "initialization timed-out\n");
@@ -191,6 +159,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+ struct regmap *regmap = rptr->regmap;
+ u32 base = rptr->base;
switch (mode) {
case PHY_MODE_USB_HOST:
@@ -199,10 +169,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* per eUSB 1.2 Spec. Below implement software workaround until
* PHY and controller is fixing seen observation.
*/
- regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
- F_CLK_19P2M_EN, F_CLK_19P2M_EN);
- regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
- V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+ regmap_write(regmap, base + EUSB2_FORCE_EN_5, F_CLK_19P2M_EN);
+ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, V_CLK_19P2M_EN);
break;
case PHY_MODE_USB_DEVICE:
/*
@@ -211,10 +179,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* repeater doesn't clear previous value due to shared
* regulators (say host <-> device mode switch).
*/
- regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
- F_CLK_19P2M_EN, 0);
- regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
- V_CLK_19P2M_EN, 0);
+ regmap_write(regmap, base + EUSB2_FORCE_EN_5, 0);
+ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, 0);
break;
default:
return -EINVAL;
@@ -243,9 +209,8 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct device_node *np = dev->of_node;
- struct regmap *regmap;
- int i, ret;
u32 res;
+ int ret;
rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
if (!rptr)
@@ -258,22 +223,15 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
if (!rptr->cfg)
return -EINVAL;
- regmap = dev_get_regmap(dev->parent, NULL);
- if (!regmap)
+ rptr->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!rptr->regmap)
return -ENODEV;
ret = of_property_read_u32(np, "reg", &res);
if (ret < 0)
return ret;
- for (i = 0; i < F_NUM_FIELDS; i++)
- eusb2_repeater_tune_reg_fields[i].reg += res;
-
- ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
- eusb2_repeater_tune_reg_fields,
- F_NUM_FIELDS);
- if (ret)
- return ret;
+ rptr->base = res;
ret = eusb2_repeater_init_vregs(rptr);
if (ret < 0) {
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index c2590579190a..03fb0d4b75d7 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -299,7 +299,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
qphy->vreg = devm_regulator_get(dev, "vdda-phy");
if (IS_ERR(qphy->vreg))
- return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
phy_set_drvdata(qphy->phy, qphy);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 243cc2b9a0fb..5c003988c35d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1556,6 +1556,14 @@ static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
+static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
+ .serdes = 0,
+ .pcs = 0x800,
+ .pcs_misc = 0x600,
+ .tx = 0x200,
+ .rx = 0x400,
+};
+
static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
.serdes = 0,
.pcs = 0x800,
@@ -1564,7 +1572,7 @@ static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
.rx = 0x400,
};
-static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
+static const struct qmp_usb_offsets qmp_usb_offsets_v3_msm8996 = {
.serdes = 0,
.pcs = 0x600,
.tx = 0x200,
@@ -1613,6 +1621,24 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v7 = {
.rx = 0x1000,
};
+static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
+ .lanes = 1,
+
+ .offsets = &qmp_usb_offsets_v3,
+
+ .serdes_tbl = ipq9574_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(ipq9574_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = ipq8074_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(ipq8074_usb3_rx_tbl),
+ .pcs_tbl = ipq8074_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(ipq8074_usb3_pcs_tbl),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+};
+
static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
.lanes = 1,
@@ -1652,7 +1678,7 @@ static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.lanes = 1,
- .offsets = &qmp_usb_offsets_v3,
+ .offsets = &qmp_usb_offsets_v3_msm8996,
.serdes_tbl = msm8996_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
@@ -2563,7 +2589,7 @@ err_node_put:
static const struct of_device_id qmp_usb_of_match_table[] = {
{
.compatible = "qcom,ipq6018-qmp-usb3-phy",
- .data = &ipq8074_usb3phy_cfg,
+ .data = &ipq6018_usb3phy_cfg,
}, {
.compatible = "qcom,ipq8074-qmp-usb3-phy",
.data = &ipq8074_usb3phy_cfg,
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index e53eace7c91e..6387c0d34c55 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -673,8 +673,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
channel->irq = platform_get_irq_optional(pdev, 0);
channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
- int ret;
-
channel->is_otg_channel = true;
channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
"renesas,no-otg-pins");
@@ -738,8 +736,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
ret = PTR_ERR(provider);
goto error;
} else if (channel->is_otg_channel) {
- int ret;
-
ret = device_create_file(dev, &dev_attr_role);
if (ret < 0)
goto error;
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index dd2913ac0fa2..78e19b128962 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -117,7 +117,7 @@ static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
{
struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
- if (!phy->comparator)
+ if (!phy->comparator || !phy->comparator->set_vbus)
return -ENODEV;
return phy->comparator->set_vbus(phy->comparator, enabled);
@@ -127,7 +127,7 @@ static int omap_usb_start_srp(struct usb_otg *otg)
{
struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
- if (!phy->comparator)
+ if (!phy->comparator || !phy->comparator->start_srp)
return -ENODEV;
return phy->comparator->start_srp(phy->comparator);
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 03ecb3d1aaf6..49f89b70dcec 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1159,7 +1159,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
}
ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
+ IRQF_SHARED | IRQF_ONESHOT, KBUILD_MODNAME, gpio_dev);
if (ret)
goto out2;
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 1dd84c7a79de..b1995ac268d7 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -1170,7 +1170,7 @@ static int mlxbf_pmc_program_crspace_counter(int blk_num, uint32_t cnt_num,
int ret;
addr = pmc->block[blk_num].mmio_base +
- (rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
+ ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
ret = mlxbf_pmc_readl(addr, &word);
if (ret)
return ret;
@@ -1413,7 +1413,7 @@ static int mlxbf_pmc_read_crspace_event(int blk_num, uint32_t cnt_num,
int ret;
addr = pmc->block[blk_num].mmio_base +
- (rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
+ ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
ret = mlxbf_pmc_readl(addr, &word);
if (ret)
return ret;
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index ed16ec422a7b..b8d1e32e97eb 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -47,6 +47,9 @@
/* Message with data needs at least two words (for header & data). */
#define MLXBF_TMFIFO_DATA_MIN_WORDS 2
+/* Tx timeout in milliseconds. */
+#define TMFIFO_TX_TIMEOUT 2000
+
/* ACPI UID for BlueField-3. */
#define TMFIFO_BF3_UID 1
@@ -62,12 +65,14 @@ struct mlxbf_tmfifo;
* @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
+ * @rem_padding: remaining bytes to send as paddings
* @pkt_len: total length of the pending packet
* @next_avail: next avail descriptor id
* @num: vring size (number of descriptors)
* @align: vring alignment size
* @index: vring index
* @vdev_id: vring virtio id (VIRTIO_ID_xxx)
+ * @tx_timeout: expire time of last tx packet
* @fifo: pointer to the tmfifo structure
*/
struct mlxbf_tmfifo_vring {
@@ -79,12 +84,14 @@ struct mlxbf_tmfifo_vring {
struct vring_desc drop_desc;
int cur_len;
int rem_len;
+ int rem_padding;
u32 pkt_len;
u16 next_avail;
int num;
int align;
int index;
int vdev_id;
+ unsigned long tx_timeout;
struct mlxbf_tmfifo *fifo;
};
@@ -819,6 +826,50 @@ mlxbf_tmfifo_desc_done:
return true;
}
+static void mlxbf_tmfifo_check_tx_timeout(struct mlxbf_tmfifo_vring *vring)
+{
+ unsigned long flags;
+
+ /* Only handle Tx timeout for network vdev. */
+ if (vring->vdev_id != VIRTIO_ID_NET)
+ return;
+
+ /* Initialize the timeout or return if not expired. */
+ if (!vring->tx_timeout) {
+ /* Initialize the timeout. */
+ vring->tx_timeout = jiffies +
+ msecs_to_jiffies(TMFIFO_TX_TIMEOUT);
+ return;
+ } else if (time_before(jiffies, vring->tx_timeout)) {
+ /* Return if not timeout yet. */
+ return;
+ }
+
+ /*
+ * Drop the packet after timeout. The outstanding packet is
+ * released and the remaining bytes will be sent with padding byte 0x00
+ * as a recovery. On the peer(host) side, the padding bytes 0x00 will be
+ * either dropped directly, or appended into existing outstanding packet
+ * thus dropped as corrupted network packet.
+ */
+ vring->rem_padding = round_up(vring->rem_len, sizeof(u64));
+ mlxbf_tmfifo_release_pkt(vring);
+ vring->cur_len = 0;
+ vring->rem_len = 0;
+ vring->fifo->vring[0] = NULL;
+
+ /*
+ * Make sure the load/store are in order before
+ * returning back to virtio.
+ */
+ virtio_mb(false);
+
+ /* Notify upper layer. */
+ spin_lock_irqsave(&vring->fifo->spin_lock[0], flags);
+ vring_interrupt(0, vring->vq);
+ spin_unlock_irqrestore(&vring->fifo->spin_lock[0], flags);
+}
+
/* Rx & Tx processing of a queue. */
static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
{
@@ -841,6 +892,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
return;
do {
+retry:
/* Get available FIFO space. */
if (avail == 0) {
if (is_rx)
@@ -851,6 +903,17 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
break;
}
+ /* Insert paddings for discarded Tx packet. */
+ if (!is_rx) {
+ vring->tx_timeout = 0;
+ while (vring->rem_padding >= sizeof(u64)) {
+ writeq(0, vring->fifo->tx.data);
+ vring->rem_padding -= sizeof(u64);
+ if (--avail == 0)
+ goto retry;
+ }
+ }
+
/* Console output always comes from the Tx buffer. */
if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
mlxbf_tmfifo_console_tx(fifo, avail);
@@ -860,6 +923,10 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
/* Handle one descriptor. */
more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
} while (more);
+
+ /* Check Tx timeout. */
+ if (avail <= 0 && !is_rx)
+ mlxbf_tmfifo_check_tx_timeout(vring);
}
/* Handle Rx or Tx queues. */
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
index f246252bddd8..f4fa8bd8bda8 100644
--- a/drivers/platform/x86/amd/pmf/Kconfig
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -10,6 +10,7 @@ config AMD_PMF
depends on AMD_NB
select ACPI_PLATFORM_PROFILE
depends on TEE && AMDTEE
+ depends on AMD_SFH_HID
help
This driver provides support for the AMD Platform Management Framework.
The goal is to enhance end user experience by making AMD PCs smarter,
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index feaa09f5b35a..4f734e049f4a 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -296,7 +296,8 @@ static int amd_pmf_suspend_handler(struct device *dev)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
- kfree(pdev->buf);
+ if (pdev->smart_pc_enabled)
+ cancel_delayed_work_sync(&pdev->pb_work);
return 0;
}
@@ -312,6 +313,9 @@ static int amd_pmf_resume_handler(struct device *dev)
return ret;
}
+ if (pdev->smart_pc_enabled)
+ schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
+
return 0;
}
@@ -330,9 +334,14 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
}
- if (!amd_pmf_init_smart_pc(dev)) {
+ amd_pmf_init_smart_pc(dev);
+ if (dev->smart_pc_enabled) {
dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
- } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ /* If Smart PC is enabled, no need to check for other features */
+ return;
+ }
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_init_auto_mode(dev);
dev_dbg(dev->dev, "Auto Mode Init done\n");
} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
@@ -351,7 +360,7 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
amd_pmf_deinit_sps(dev);
}
- if (!dev->smart_pc_enabled) {
+ if (dev->smart_pc_enabled) {
amd_pmf_deinit_smart_pc(dev);
} else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_deinit_auto_mode(dev);
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 16999c5b334f..66cae1cca73c 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -441,11 +441,6 @@ struct apmf_dyn_slider_output {
struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
} __packed;
-enum smart_pc_status {
- PMF_SMART_PC_ENABLED,
- PMF_SMART_PC_DISABLED,
-};
-
/* Smart PC - TA internals */
enum system_state {
SYSTEM_STATE_S0i3,
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index a0423942f771..a3dec14c3004 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -10,6 +10,7 @@
*/
#include <acpi/button.h>
+#include <linux/amd-pmf-io.h>
#include <linux/power_supply.h>
#include <linux/units.h>
#include "pmf.h"
@@ -44,6 +45,8 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *
dev_dbg(dev->dev, "Max C0 Residency: %u\n", in->ev_info.max_c0residency);
dev_dbg(dev->dev, "GFX Busy: %u\n", in->ev_info.gfx_busy);
dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open");
+ dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away");
+ dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light);
dev_dbg(dev->dev, "==== TA inputs END ====\n");
}
#else
@@ -147,6 +150,38 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_
return 0;
}
+static int amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+ struct amd_sfh_info sfh_info;
+ int ret;
+
+ /* Get ALS data */
+ ret = amd_get_sfh_info(&sfh_info, MT_ALS);
+ if (!ret)
+ in->ev_info.ambient_light = sfh_info.ambient_light;
+ else
+ return ret;
+
+ /* get HPD data */
+ ret = amd_get_sfh_info(&sfh_info, MT_HPD);
+ if (ret)
+ return ret;
+
+ switch (sfh_info.user_present) {
+ case SFH_NOT_DETECTED:
+ in->ev_info.user_present = 0xff; /* assume no sensors connected */
+ break;
+ case SFH_USER_PRESENT:
+ in->ev_info.user_present = 1;
+ break;
+ case SFH_USER_AWAY:
+ in->ev_info.user_present = 0;
+ break;
+ }
+
+ return 0;
+}
+
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
{
/* TA side lid open is 1 and close is 0, hence the ! here */
@@ -155,4 +190,5 @@ void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_tab
amd_pmf_get_smu_info(dev, in);
amd_pmf_get_battery_info(dev, in);
amd_pmf_get_slider_info(dev, in);
+ amd_pmf_get_sensor_info(dev, in);
}
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 502ce93d5cdd..dcbe8f85e122 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -252,15 +252,17 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
cookie = readl(dev->policy_buf + POLICY_COOKIE_OFFSET);
length = readl(dev->policy_buf + POLICY_COOKIE_LEN);
- if (cookie != POLICY_SIGN_COOKIE || !length)
+ if (cookie != POLICY_SIGN_COOKIE || !length) {
+ dev_dbg(dev->dev, "cookie doesn't match\n");
return -EINVAL;
+ }
/* Update the actual length */
dev->policy_sz = length + 512;
res = amd_pmf_invoke_cmd_init(dev);
if (res == TA_PMF_TYPE_SUCCESS) {
/* Now its safe to announce that smart pc is enabled */
- dev->smart_pc_enabled = PMF_SMART_PC_ENABLED;
+ dev->smart_pc_enabled = true;
/*
* Start collecting the data from TA FW after a small delay
* or else, we might end up getting stale values.
@@ -268,7 +270,7 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
} else {
dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
- dev->smart_pc_enabled = PMF_SMART_PC_DISABLED;
+ dev->smart_pc_enabled = false;
return res;
}
@@ -298,8 +300,10 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
if (!new_policy_buf)
return -ENOMEM;
- if (copy_from_user(new_policy_buf, buf, length))
+ if (copy_from_user(new_policy_buf, buf, length)) {
+ kfree(new_policy_buf);
return -EFAULT;
+ }
kfree(dev->policy_buf);
dev->policy_buf = new_policy_buf;
@@ -334,25 +338,6 @@ static void amd_pmf_remove_pb(struct amd_pmf_dev *dev) {}
static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) {}
#endif
-static int amd_pmf_get_bios_buffer(struct amd_pmf_dev *dev)
-{
- dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
- if (!dev->policy_buf)
- return -ENOMEM;
-
- dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
- if (!dev->policy_base)
- return -ENOMEM;
-
- memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
-
- amd_pmf_hex_dump_pb(dev);
- if (pb_side_load)
- amd_pmf_open_pb(dev, dev->dbgfs_dir);
-
- return amd_pmf_start_policy_engine(dev);
-}
-
static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const void *data)
{
return ver->impl_id == TEE_IMPL_ID_AMDTEE;
@@ -451,22 +436,59 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
return ret;
INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
- amd_pmf_set_dram_addr(dev, true);
- amd_pmf_get_bios_buffer(dev);
+
+ ret = amd_pmf_set_dram_addr(dev, true);
+ if (ret)
+ goto error;
+
+ dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
+ if (!dev->policy_base) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
+ if (!dev->policy_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
+
+ amd_pmf_hex_dump_pb(dev);
+
dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
- if (!dev->prev_data)
- return -ENOMEM;
+ if (!dev->prev_data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = amd_pmf_start_policy_engine(dev);
+ if (ret)
+ goto error;
+
+ if (pb_side_load)
+ amd_pmf_open_pb(dev, dev->dbgfs_dir);
+
+ return 0;
- return dev->smart_pc_enabled;
+error:
+ amd_pmf_deinit_smart_pc(dev);
+
+ return ret;
}
void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev)
{
- if (pb_side_load)
+ if (pb_side_load && dev->esbin)
amd_pmf_remove_pb(dev);
+ cancel_delayed_work_sync(&dev->pb_work);
kfree(dev->prev_data);
+ dev->prev_data = NULL;
kfree(dev->policy_buf);
- cancel_delayed_work_sync(&dev->pb_work);
+ dev->policy_buf = NULL;
+ kfree(dev->buf);
+ dev->buf = NULL;
amd_pmf_tee_deinit(dev);
}
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index a1ee1a74fc3c..2cf3b4a8813f 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -399,7 +399,8 @@ int ifs_load_firmware(struct device *dev)
if (fw->size != expected_size) {
dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n",
expected_size, fw->size);
- return -EINVAL;
+ ret = -EINVAL;
+ goto release;
}
ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data);
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index b6708bab7c53..527d8fbc7cc1 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -196,7 +196,7 @@ static int int0002_probe(struct platform_device *pdev)
* IRQs into gpiolib.
*/
ret = devm_request_irq(dev, irq, int0002_irq,
- IRQF_SHARED, "INT0002", chip);
+ IRQF_ONESHOT | IRQF_SHARED, "INT0002", chip);
if (ret) {
dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret);
return ret;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 33ab207493e3..33bb58dc3f78 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -23,23 +23,23 @@ static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned
static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
-static ssize_t show_domain_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_dev_attr);
+ struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_kobj_attr);
return sprintf(buf, "%u\n", data->domain_id);
}
-static ssize_t show_fabric_cluster_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_fabric_cluster_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_dev_attr);
+ struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_kobj_attr);
return sprintf(buf, "%u\n", data->cluster_id);
}
-static ssize_t show_package_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- struct uncore_data *data = container_of(attr, struct uncore_data, package_id_dev_attr);
+ struct uncore_data *data = container_of(attr, struct uncore_data, package_id_kobj_attr);
return sprintf(buf, "%u\n", data->package_id);
}
@@ -97,30 +97,30 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
}
#define store_uncore_min_max(name, min_max) \
- static ssize_t store_##name(struct device *dev, \
- struct device_attribute *attr, \
+ static ssize_t store_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
return store_min_max_freq_khz(data, buf, count, \
min_max); \
}
#define show_uncore_min_max(name, min_max) \
- static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, char *buf)\
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
{ \
- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
return show_min_max_freq_khz(data, buf, min_max); \
}
#define show_uncore_perf_status(name) \
- static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, char *buf)\
+ static ssize_t show_##name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
{ \
- struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\
return show_perf_status_freq_khz(data, buf); \
}
@@ -134,11 +134,11 @@ show_uncore_min_max(max_freq_khz, 1);
show_uncore_perf_status(current_freq_khz);
#define show_uncore_data(member_name) \
- static ssize_t show_##member_name(struct device *dev, \
- struct device_attribute *attr, char *buf)\
+ static ssize_t show_##member_name(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf)\
{ \
struct uncore_data *data = container_of(attr, struct uncore_data,\
- member_name##_dev_attr);\
+ member_name##_kobj_attr);\
\
return sysfs_emit(buf, "%u\n", \
data->member_name); \
@@ -149,29 +149,29 @@ show_uncore_data(initial_max_freq_khz);
#define init_attribute_rw(_name) \
do { \
- sysfs_attr_init(&data->_name##_dev_attr.attr); \
- data->_name##_dev_attr.show = show_##_name; \
- data->_name##_dev_attr.store = store_##_name; \
- data->_name##_dev_attr.attr.name = #_name; \
- data->_name##_dev_attr.attr.mode = 0644; \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = store_##_name; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0644; \
} while (0)
#define init_attribute_ro(_name) \
do { \
- sysfs_attr_init(&data->_name##_dev_attr.attr); \
- data->_name##_dev_attr.show = show_##_name; \
- data->_name##_dev_attr.store = NULL; \
- data->_name##_dev_attr.attr.name = #_name; \
- data->_name##_dev_attr.attr.mode = 0444; \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = NULL; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0444; \
} while (0)
#define init_attribute_root_ro(_name) \
do { \
- sysfs_attr_init(&data->_name##_dev_attr.attr); \
- data->_name##_dev_attr.show = show_##_name; \
- data->_name##_dev_attr.store = NULL; \
- data->_name##_dev_attr.attr.name = #_name; \
- data->_name##_dev_attr.attr.mode = 0400; \
+ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+ data->_name##_kobj_attr.show = show_##_name; \
+ data->_name##_kobj_attr.store = NULL; \
+ data->_name##_kobj_attr.attr.name = #_name; \
+ data->_name##_kobj_attr.attr.mode = 0400; \
} while (0)
static int create_attr_group(struct uncore_data *data, char *name)
@@ -186,21 +186,21 @@ static int create_attr_group(struct uncore_data *data, char *name)
if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) {
init_attribute_root_ro(domain_id);
- data->uncore_attrs[index++] = &data->domain_id_dev_attr.attr;
+ data->uncore_attrs[index++] = &data->domain_id_kobj_attr.attr;
init_attribute_root_ro(fabric_cluster_id);
- data->uncore_attrs[index++] = &data->fabric_cluster_id_dev_attr.attr;
+ data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr;
init_attribute_root_ro(package_id);
- data->uncore_attrs[index++] = &data->package_id_dev_attr.attr;
+ data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr;
}
- data->uncore_attrs[index++] = &data->max_freq_khz_dev_attr.attr;
- data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
- data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
- data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
+ data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
+ data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
ret = uncore_read_freq(data, &freq);
if (!ret)
- data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
+ data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
data->uncore_attrs[index] = NULL;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
index 7afb69977c7e..0e5bf507e555 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
@@ -26,14 +26,14 @@
* @instance_id: Unique instance id to append to directory name
* @name: Sysfs entry name for this instance
* @uncore_attr_group: Attribute group storage
- * @max_freq_khz_dev_attr: Storage for device attribute max_freq_khz
- * @mix_freq_khz_dev_attr: Storage for device attribute min_freq_khz
- * @initial_max_freq_khz_dev_attr: Storage for device attribute initial_max_freq_khz
- * @initial_min_freq_khz_dev_attr: Storage for device attribute initial_min_freq_khz
- * @current_freq_khz_dev_attr: Storage for device attribute current_freq_khz
- * @domain_id_dev_attr: Storage for device attribute domain_id
- * @fabric_cluster_id_dev_attr: Storage for device attribute fabric_cluster_id
- * @package_id_dev_attr: Storage for device attribute package_id
+ * @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz
+ * @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
+ * @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz
+ * @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz
+ * @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz
+ * @domain_id_kobj_attr: Storage for kobject attribute domain_id
+ * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
+ * @package_id_kobj_attr: Storage for kobject attribute package_id
* @uncore_attrs: Attribute storage for group creation
*
* This structure is used to encapsulate all data related to uncore sysfs
@@ -53,14 +53,14 @@ struct uncore_data {
char name[32];
struct attribute_group uncore_attr_group;
- struct device_attribute max_freq_khz_dev_attr;
- struct device_attribute min_freq_khz_dev_attr;
- struct device_attribute initial_max_freq_khz_dev_attr;
- struct device_attribute initial_min_freq_khz_dev_attr;
- struct device_attribute current_freq_khz_dev_attr;
- struct device_attribute domain_id_dev_attr;
- struct device_attribute fabric_cluster_id_dev_attr;
- struct device_attribute package_id_dev_attr;
+ struct kobj_attribute max_freq_khz_kobj_attr;
+ struct kobj_attribute min_freq_khz_kobj_attr;
+ struct kobj_attribute initial_max_freq_khz_kobj_attr;
+ struct kobj_attribute initial_min_freq_khz_kobj_attr;
+ struct kobj_attribute current_freq_khz_kobj_attr;
+ struct kobj_attribute domain_id_kobj_attr;
+ struct kobj_attribute fabric_cluster_id_kobj_attr;
+ struct kobj_attribute package_id_kobj_attr;
struct attribute *uncore_attrs[9];
};
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 210b0a81b7ec..084c355c86f5 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -200,9 +200,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
sparse_keymap_report_event(input_dev, event, val, autorelease);
-
- /* Some devices need this to report further events */
- acpi_evaluate_object(handle, "VBDL", NULL, NULL);
}
/*
diff --git a/drivers/platform/x86/intel/wmi/sbl-fw-update.c b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
index 9cf5ed0f8dc2..040153ad67c1 100644
--- a/drivers/platform/x86/intel/wmi/sbl-fw-update.c
+++ b/drivers/platform/x86/intel/wmi/sbl-fw-update.c
@@ -32,7 +32,7 @@ static int get_fwu_request(struct device *dev, u32 *out)
return -ENODEV;
if (obj->type != ACPI_TYPE_INTEGER) {
- dev_warn(dev, "wmi_query_block returned invalid value\n");
+ dev_warn(dev, "wmidev_block_query returned invalid value\n");
kfree(obj);
return -EINVAL;
}
@@ -55,7 +55,7 @@ static int set_fwu_request(struct device *dev, u32 in)
status = wmidev_block_set(to_wmi_device(dev), 0, &input);
if (ACPI_FAILURE(status)) {
- dev_err(dev, "wmi_set_block failed\n");
+ dev_err(dev, "wmidev_block_set failed\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index 1cf2471d54dd..3d66e1d4eb1f 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -20,12 +20,29 @@
#define P2SBC_HIDE BIT(8)
#define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1)
+#define P2SB_DEVFN_GOLDMONT PCI_DEVFN(13, 0)
+#define SPI_DEVFN_GOLDMONT PCI_DEVFN(13, 2)
static const struct x86_cpu_id p2sb_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
{}
};
+/*
+ * Cache BAR0 of P2SB device functions 0 to 7.
+ * TODO: The constant 8 is the number of functions that PCI specification
+ * defines. Same definitions exist tree-wide. Unify this definition and
+ * the other definitions then move to include/uapi/linux/pci.h.
+ */
+#define NR_P2SB_RES_CACHE 8
+
+struct p2sb_res_cache {
+ u32 bus_dev_id;
+ struct resource res;
+};
+
+static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
+
static int p2sb_get_devfn(unsigned int *devfn)
{
unsigned int fn = P2SB_DEVFN_DEFAULT;
@@ -39,10 +56,18 @@ static int p2sb_get_devfn(unsigned int *devfn)
return 0;
}
+static bool p2sb_valid_resource(struct resource *res)
+{
+ if (res->flags)
+ return true;
+
+ return false;
+}
+
/* Copy resource from the first BAR of the device in question */
-static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{
- struct resource *bar0 = &pdev->resource[0];
+ struct resource *bar0 = pci_resource_n(pdev, 0);
/* Make sure we have no dangling pointers in the output */
memset(mem, 0, sizeof(*mem));
@@ -56,49 +81,57 @@ static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
mem->end = bar0->end;
mem->flags = bar0->flags;
mem->desc = bar0->desc;
-
- return 0;
}
-static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
{
+ struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
struct pci_dev *pdev;
- int ret;
pdev = pci_scan_single_device(bus, devfn);
if (!pdev)
- return -ENODEV;
+ return;
- ret = p2sb_read_bar0(pdev, mem);
+ p2sb_read_bar0(pdev, &cache->res);
+ cache->bus_dev_id = bus->dev.id;
pci_stop_and_remove_bus_device(pdev);
- return ret;
}
-/**
- * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
- * @bus: PCI bus to communicate with
- * @devfn: PCI slot and function to communicate with
- * @mem: memory resource to be filled in
- *
- * The BIOS prevents the P2SB device from being enumerated by the PCI
- * subsystem, so we need to unhide and hide it back to lookup the BAR.
- *
- * if @bus is NULL, the bus 0 in domain 0 will be used.
- * If @devfn is 0, it will be replaced by devfn of the P2SB device.
- *
- * Caller must provide a valid pointer to @mem.
- *
- * Locking is handled by pci_rescan_remove_lock mutex.
- *
- * Return:
- * 0 on success or appropriate errno value on error.
- */
-int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
+{
+ /* Scan the P2SB device and cache its BAR0 */
+ p2sb_scan_and_cache_devfn(bus, devfn);
+
+ /* On Goldmont p2sb_bar() also gets called for the SPI controller */
+ if (devfn == P2SB_DEVFN_GOLDMONT)
+ p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
+
+ if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
+ return -ENOENT;
+
+ return 0;
+}
+
+static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
+{
+ static struct pci_bus *p2sb_bus;
+
+ bus = bus ?: p2sb_bus;
+ if (bus)
+ return bus;
+
+ /* Assume P2SB is on the bus 0 in domain 0 */
+ p2sb_bus = pci_find_bus(0, 0);
+ return p2sb_bus;
+}
+
+static int p2sb_cache_resources(void)
{
- struct pci_dev *pdev_p2sb;
unsigned int devfn_p2sb;
u32 value = P2SBC_HIDE;
+ struct pci_bus *bus;
+ u16 class;
int ret;
/* Get devfn for P2SB device itself */
@@ -106,8 +139,17 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
if (ret)
return ret;
- /* if @bus is NULL, use bus 0 in domain 0 */
- bus = bus ?: pci_find_bus(0, 0);
+ bus = p2sb_get_bus(NULL);
+ if (!bus)
+ return -ENODEV;
+
+ /*
+ * When a device with same devfn exists and its device class is not
+ * PCI_CLASS_MEMORY_OTHER for P2SB, do not touch it.
+ */
+ pci_bus_read_config_word(bus, devfn_p2sb, PCI_CLASS_DEVICE, &class);
+ if (!PCI_POSSIBLE_ERROR(class) && class != PCI_CLASS_MEMORY_OTHER)
+ return -ENODEV;
/*
* Prevent concurrent PCI bus scan from seeing the P2SB device and
@@ -115,17 +157,16 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
*/
pci_lock_rescan_remove();
- /* Unhide the P2SB device, if needed */
+ /*
+ * The BIOS prevents the P2SB device from being enumerated by the PCI
+ * subsystem, so we need to unhide and hide it back to lookup the BAR.
+ * Unhide the P2SB device here, if needed.
+ */
pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
if (value & P2SBC_HIDE)
pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
- pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
- if (devfn)
- ret = p2sb_scan_and_read(bus, devfn, mem);
- else
- ret = p2sb_read_bar0(pdev_p2sb, mem);
- pci_stop_and_remove_bus_device(pdev_p2sb);
+ ret = p2sb_scan_and_cache(bus, devfn_p2sb);
/* Hide the P2SB device, if it was hidden */
if (value & P2SBC_HIDE)
@@ -133,12 +174,62 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
pci_unlock_rescan_remove();
- if (ret)
- return ret;
+ return ret;
+}
- if (mem->flags == 0)
+/**
+ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+ * @bus: PCI bus to communicate with
+ * @devfn: PCI slot and function to communicate with
+ * @mem: memory resource to be filled in
+ *
+ * If @bus is NULL, the bus 0 in domain 0 will be used.
+ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+ *
+ * Caller must provide a valid pointer to @mem.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
+ */
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ struct p2sb_res_cache *cache;
+ int ret;
+
+ bus = p2sb_get_bus(bus);
+ if (!bus)
+ return -ENODEV;
+
+ if (!devfn) {
+ ret = p2sb_get_devfn(&devfn);
+ if (ret)
+ return ret;
+ }
+
+ cache = &p2sb_resources[PCI_FUNC(devfn)];
+ if (cache->bus_dev_id != bus->dev.id)
return -ENODEV;
+ if (!p2sb_valid_resource(&cache->res))
+ return -ENOENT;
+
+ memcpy(mem, &cache->res, sizeof(*mem));
return 0;
}
EXPORT_SYMBOL_GPL(p2sb_bar);
+
+static int __init p2sb_fs_init(void)
+{
+ p2sb_cache_resources();
+ return 0;
+}
+
+/*
+ * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
+ * not be locked in sysfs pci bus rescan path because of deadlock. To
+ * avoid the deadlock, access to P2SB devices with the lock at an early
+ * step in kernel initialization and cache required resources. This
+ * should happen after subsys_initcall which initializes PCI subsystem
+ * and before device_initcall which requires P2SB resources.
+ */
+fs_initcall(p2sb_fs_init);
diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
new file mode 100644
index 000000000000..bcf3a0c356ea
--- /dev/null
+++ b/drivers/platform/x86/serdev_helpers.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * In some cases UART attached devices which require an in kernel driver,
+ * e.g. UART attached Bluetooth HCIs are described in the ACPI tables
+ * by an ACPI device with a broken or missing UartSerialBusV2() resource.
+ *
+ * This causes the kernel to create a /dev/ttyS# char-device for the UART
+ * instead of creating an in kernel serdev-controller + serdev-device pair
+ * for the in kernel driver.
+ *
+ * The quirk handling in acpi_quirk_skip_serdev_enumeration() makes the kernel
+ * create a serdev-controller device for these UARTs instead of a /dev/ttyS#.
+ *
+ * Instantiating the actual serdev-device to bind to is up to pdx86 code,
+ * this header provides a helper for getting the serdev-controller device.
+ */
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/printk.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+
+static inline struct device *
+get_serdev_controller(const char *serial_ctrl_hid,
+ const char *serial_ctrl_uid,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
+{
+ struct device *ctrl_dev, *child;
+ struct acpi_device *ctrl_adev;
+ char name[32];
+ int i;
+
+ ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+ if (!ctrl_adev) {
+ pr_err("error could not get %s/%s serial-ctrl adev\n",
+ serial_ctrl_hid, serial_ctrl_uid);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* get_first_physical_node() returns a weak ref */
+ ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
+ if (!ctrl_dev) {
+ pr_err("error could not get %s/%s serial-ctrl physical node\n",
+ serial_ctrl_hid, serial_ctrl_uid);
+ ctrl_dev = ERR_PTR(-ENODEV);
+ goto put_ctrl_adev;
+ }
+
+ /* Walk host -> uart-ctrl -> port -> serdev-ctrl */
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ snprintf(name, sizeof(name), "%s:0", dev_name(ctrl_dev));
+ break;
+ case 1:
+ snprintf(name, sizeof(name), "%s.%d",
+ dev_name(ctrl_dev), serial_ctrl_port);
+ break;
+ case 2:
+ strscpy(name, serdev_ctrl_name, sizeof(name));
+ break;
+ }
+
+ child = device_find_child_by_name(ctrl_dev, name);
+ put_device(ctrl_dev);
+ if (!child) {
+ pr_err("error could not find '%s' device\n", name);
+ ctrl_dev = ERR_PTR(-ENODEV);
+ goto put_ctrl_adev;
+ }
+
+ ctrl_dev = child;
+ }
+
+put_ctrl_adev:
+ acpi_dev_put(ctrl_adev);
+ return ctrl_dev;
+}
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 3a396b763c49..ce3e08815a8e 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -1009,7 +1009,16 @@ static ssize_t current_value_store(struct kobject *kobj,
* Note - this sets the variable and then the password as separate
* WMI calls. Function tlmi_save_bios_settings will error if the
* password is incorrect.
+ * Workstation's require the opcode to be set before changing the
+ * attribute.
*/
+ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+ ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
+ tlmi_priv.pwd_admin->password);
+ if (ret)
+ goto out;
+ }
+
set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
new_setting);
if (!set_str) {
@@ -1021,17 +1030,10 @@ static ssize_t current_value_store(struct kobject *kobj,
if (ret)
goto out;
- if (tlmi_priv.save_mode == TLMI_SAVE_BULK) {
+ if (tlmi_priv.save_mode == TLMI_SAVE_BULK)
tlmi_priv.save_required = true;
- } else {
- if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
- ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
- tlmi_priv.pwd_admin->password);
- if (ret)
- goto out;
- }
+ else
ret = tlmi_save_bios_settings("");
- }
} else { /* old non-opcode based authentication method (deprecated) */
if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s;",
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c4895e9bc714..5ecd9d33250d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -10308,6 +10308,7 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
return 0;
default:
/* Unknown function */
+ pr_debug("unknown function 0x%x\n", funcmode);
return -EOPNOTSUPP;
}
return 0;
@@ -10493,8 +10494,8 @@ static void dytc_profile_refresh(void)
return;
perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
- convert_dytc_to_profile(funcmode, perfmode, &profile);
- if (profile != dytc_current_profile) {
+ err = convert_dytc_to_profile(funcmode, perfmode, &profile);
+ if (!err && profile != dytc_current_profile) {
dytc_current_profile = profile;
platform_profile_notify();
}
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 0c6733772698..975cf24ae359 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -81,7 +81,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
};
static const struct ts_dmi_data chuwi_hi8_air_data = {
- .acpi_name = "MSSL1680:00",
+ .acpi_name = "MSSL1680",
.properties = chuwi_hi8_air_props,
};
@@ -415,18 +415,13 @@ static const struct property_entry gdix1001_upside_down_props[] = {
{ }
};
-static const struct ts_dmi_data gdix1001_00_upside_down_data = {
- .acpi_name = "GDIX1001:00",
- .properties = gdix1001_upside_down_props,
-};
-
-static const struct ts_dmi_data gdix1001_01_upside_down_data = {
- .acpi_name = "GDIX1001:01",
+static const struct ts_dmi_data gdix1001_upside_down_data = {
+ .acpi_name = "GDIX1001",
.properties = gdix1001_upside_down_props,
};
-static const struct ts_dmi_data gdix1002_00_upside_down_data = {
- .acpi_name = "GDIX1002:00",
+static const struct ts_dmi_data gdix1002_upside_down_data = {
+ .acpi_name = "GDIX1002",
.properties = gdix1001_upside_down_props,
};
@@ -944,6 +939,32 @@ static const struct ts_dmi_data teclast_tbook11_data = {
.properties = teclast_tbook11_props,
};
+static const struct property_entry teclast_x16_plus_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data teclast_x16_plus_data = {
+ .embedded_fw = {
+ .name = "silead/gsl3692-teclast-x16-plus.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 43560,
+ .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
+ 0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
+ 0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
+ 0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = teclast_x16_plus_props,
+};
+
static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -1386,7 +1407,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Juno Tablet */
- .driver_data = (void *)&gdix1002_00_upside_down_data,
+ .driver_data = (void *)&gdix1002_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
/* Both product- and board-name being "Default string" is somewhat rare */
@@ -1613,6 +1634,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Teclast X16 Plus */
+ .driver_data = (void *)&teclast_x16_plus_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "D3A5_A1"),
+ },
+ },
+ {
/* Teclast X3 Plus */
.driver_data = (void *)&teclast_x3_plus_data,
.matches = {
@@ -1623,7 +1653,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X89 (Android version / BIOS) */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
@@ -1631,7 +1661,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X89 (Windows version / BIOS) */
- .driver_data = (void *)&gdix1001_01_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
/* tPAD is too generic, also match on bios date */
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
@@ -1649,7 +1679,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X98 Pro */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
/*
* Only match BIOS date, because the manufacturers
@@ -1753,7 +1783,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* "WinBook TW100" */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
@@ -1761,7 +1791,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* WinBook TW700 */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
@@ -1786,7 +1816,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
int error;
if (has_acpi_companion(dev) &&
- !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
+ strstarts(client->name, ts_data->acpi_name)) {
error = device_create_managed_software_node(dev, ts_data->properties, NULL);
if (error)
dev_err(dev, "failed to add properties: %d\n", error);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index bd271a5730aa..3c288e8f404b 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/types.h>
@@ -56,7 +57,6 @@ static_assert(__alignof__(struct guid_block) == 1);
enum { /* wmi_block flags */
WMI_READ_TAKES_NO_ARGS,
- WMI_PROBED,
};
struct wmi_block {
@@ -64,8 +64,10 @@ struct wmi_block {
struct list_head list;
struct guid_block gblock;
struct acpi_device *acpi_device;
+ struct rw_semaphore notify_lock; /* Protects notify callback add/remove */
wmi_notify_handler handler;
void *handler_data;
+ bool driver_ready;
unsigned long flags;
};
@@ -219,6 +221,17 @@ static int wmidev_match_guid(struct device *dev, const void *data)
return 0;
}
+static int wmidev_match_notify_id(struct device *dev, const void *data)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ const u32 *notify_id = data;
+
+ if (wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *notify_id)
+ return 1;
+
+ return 0;
+}
+
static struct bus_type wmi_bus_type;
static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
@@ -238,6 +251,17 @@ static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
return dev_to_wdev(dev);
}
+static struct wmi_device *wmi_find_event_by_notify_id(const u32 notify_id)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&wmi_bus_type, NULL, &notify_id, wmidev_match_notify_id);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wmi_device(dev);
+}
+
static void wmi_device_put(struct wmi_device *wdev)
{
put_device(&wdev->dev);
@@ -572,32 +596,31 @@ acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler,
void *data)
{
- struct wmi_block *block;
- acpi_status status = AE_NOT_EXIST;
- guid_t guid_input;
-
- if (!guid || !handler)
- return AE_BAD_PARAMETER;
+ struct wmi_block *wblock;
+ struct wmi_device *wdev;
+ acpi_status status;
- if (guid_parse(guid, &guid_input))
- return AE_BAD_PARAMETER;
+ wdev = wmi_find_device_by_guid(guid);
+ if (IS_ERR(wdev))
+ return AE_ERROR;
- list_for_each_entry(block, &wmi_block_list, list) {
- acpi_status wmi_status;
+ wblock = container_of(wdev, struct wmi_block, dev);
- if (guid_equal(&block->gblock.guid, &guid_input)) {
- if (block->handler)
- return AE_ALREADY_ACQUIRED;
+ down_write(&wblock->notify_lock);
+ if (wblock->handler) {
+ status = AE_ALREADY_ACQUIRED;
+ } else {
+ wblock->handler = handler;
+ wblock->handler_data = data;
- block->handler = handler;
- block->handler_data = data;
+ if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
+ dev_warn(&wblock->dev.dev, "Failed to enable device\n");
- wmi_status = wmi_method_enable(block, true);
- if ((wmi_status != AE_OK) ||
- ((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
- status = wmi_status;
- }
+ status = AE_OK;
}
+ up_write(&wblock->notify_lock);
+
+ wmi_device_put(wdev);
return status;
}
@@ -613,30 +636,31 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
*/
acpi_status wmi_remove_notify_handler(const char *guid)
{
- struct wmi_block *block;
- acpi_status status = AE_NOT_EXIST;
- guid_t guid_input;
+ struct wmi_block *wblock;
+ struct wmi_device *wdev;
+ acpi_status status;
- if (!guid)
- return AE_BAD_PARAMETER;
+ wdev = wmi_find_device_by_guid(guid);
+ if (IS_ERR(wdev))
+ return AE_ERROR;
- if (guid_parse(guid, &guid_input))
- return AE_BAD_PARAMETER;
+ wblock = container_of(wdev, struct wmi_block, dev);
- list_for_each_entry(block, &wmi_block_list, list) {
- acpi_status wmi_status;
+ down_write(&wblock->notify_lock);
+ if (!wblock->handler) {
+ status = AE_NULL_ENTRY;
+ } else {
+ if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+ dev_warn(&wblock->dev.dev, "Failed to disable device\n");
- if (guid_equal(&block->gblock.guid, &guid_input)) {
- if (!block->handler)
- return AE_NULL_ENTRY;
+ wblock->handler = NULL;
+ wblock->handler_data = NULL;
- wmi_status = wmi_method_enable(block, false);
- block->handler = NULL;
- block->handler_data = NULL;
- if (wmi_status != AE_OK || (wmi_status == AE_OK && status == AE_NOT_EXIST))
- status = wmi_status;
- }
+ status = AE_OK;
}
+ up_write(&wblock->notify_lock);
+
+ wmi_device_put(wdev);
return status;
}
@@ -655,15 +679,19 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
{
struct wmi_block *wblock;
+ struct wmi_device *wdev;
+ acpi_status status;
- list_for_each_entry(wblock, &wmi_block_list, list) {
- struct guid_block *gblock = &wblock->gblock;
+ wdev = wmi_find_event_by_notify_id(event);
+ if (IS_ERR(wdev))
+ return AE_NOT_FOUND;
- if ((gblock->flags & ACPI_WMI_EVENT) && gblock->notify_id == event)
- return get_event_data(wblock, out);
- }
+ wblock = container_of(wdev, struct wmi_block, dev);
+ status = get_event_data(wblock, out);
- return AE_NOT_FOUND;
+ wmi_device_put(wdev);
+
+ return status;
}
EXPORT_SYMBOL_GPL(wmi_get_event_data);
@@ -868,7 +896,7 @@ static int wmi_dev_probe(struct device *dev)
if (wdriver->probe) {
ret = wdriver->probe(dev_to_wdev(dev),
find_guid_context(wblock, wdriver));
- if (!ret) {
+ if (ret) {
if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
dev_warn(dev, "Failed to disable device\n");
@@ -876,7 +904,9 @@ static int wmi_dev_probe(struct device *dev)
}
}
- set_bit(WMI_PROBED, &wblock->flags);
+ down_write(&wblock->notify_lock);
+ wblock->driver_ready = true;
+ up_write(&wblock->notify_lock);
return 0;
}
@@ -886,7 +916,9 @@ static void wmi_dev_remove(struct device *dev)
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
- clear_bit(WMI_PROBED, &wblock->flags);
+ down_write(&wblock->notify_lock);
+ wblock->driver_ready = false;
+ up_write(&wblock->notify_lock);
if (wdriver->remove)
wdriver->remove(dev_to_wdev(dev));
@@ -999,6 +1031,8 @@ static int wmi_create_device(struct device *wmi_bus_dev,
wblock->dev.setable = true;
out_init:
+ init_rwsem(&wblock->notify_lock);
+ wblock->driver_ready = false;
wblock->dev.dev.bus = &wmi_bus_type;
wblock->dev.dev.parent = wmi_bus_dev;
@@ -1171,6 +1205,26 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
}
}
+static void wmi_notify_driver(struct wmi_block *wblock)
+{
+ struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
+ struct acpi_buffer data = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ if (!driver->no_notify_data) {
+ status = get_event_data(wblock, &data);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&wblock->dev.dev, "Failed to get event data\n");
+ return;
+ }
+ }
+
+ if (driver->notify)
+ driver->notify(&wblock->dev, data.pointer);
+
+ kfree(data.pointer);
+}
+
static int wmi_notify_device(struct device *dev, void *data)
{
struct wmi_block *wblock = dev_to_wblock(dev);
@@ -1179,28 +1233,17 @@ static int wmi_notify_device(struct device *dev, void *data)
if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event))
return 0;
- /* If a driver is bound, then notify the driver. */
- if (test_bit(WMI_PROBED, &wblock->flags) && wblock->dev.dev.driver) {
- struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
- struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_status status;
-
- if (!driver->no_notify_data) {
- status = get_event_data(wblock, &evdata);
- if (ACPI_FAILURE(status)) {
- dev_warn(&wblock->dev.dev, "failed to get event data\n");
- return -EIO;
- }
- }
-
- if (driver->notify)
- driver->notify(&wblock->dev, evdata.pointer);
-
- kfree(evdata.pointer);
- } else if (wblock->handler) {
- /* Legacy handler */
- wblock->handler(*event, wblock->handler_data);
+ down_read(&wblock->notify_lock);
+ /* The WMI driver notify handler conflicts with the legacy WMI handler.
+ * Because of this the WMI driver notify handler takes precedence.
+ */
+ if (wblock->dev.dev.driver && wblock->driver_ready) {
+ wmi_notify_driver(wblock);
+ } else {
+ if (wblock->handler)
+ wblock->handler(*event, wblock->handler_data);
}
+ up_read(&wblock->notify_lock);
acpi_bus_generate_netlink_event(wblock->acpi_device->pnp.device_class,
dev_name(&wblock->dev.dev), *event, 0);
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index f8221a15575b..a3415f1c0b5f 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include "x86-android-tablets.h"
+#include "../serdev_helpers.h"
static struct platform_device *x86_android_tablet_device;
@@ -113,6 +114,9 @@ int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
if (irq_type != IRQ_TYPE_NONE && irq_type != irq_get_trigger_type(irq))
irq_set_irq_type(irq, irq_type);
+ if (data->free_gpio)
+ devm_gpiod_put(&x86_android_tablet_device->dev, gpiod);
+
return irq;
case X86_ACPI_IRQ_TYPE_PMIC:
status = acpi_get_handle(NULL, data->chip, &handle);
@@ -229,38 +233,20 @@ static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, i
static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
{
- struct acpi_device *ctrl_adev, *serdev_adev;
+ struct acpi_device *serdev_adev;
struct serdev_device *serdev;
struct device *ctrl_dev;
int ret = -ENODEV;
- ctrl_adev = acpi_dev_get_first_match_dev(info->ctrl_hid, info->ctrl_uid, -1);
- if (!ctrl_adev) {
- pr_err("error could not get %s/%s ctrl adev\n",
- info->ctrl_hid, info->ctrl_uid);
- return -ENODEV;
- }
+ ctrl_dev = get_serdev_controller(info->ctrl_hid, info->ctrl_uid, 0,
+ info->ctrl_devname);
+ if (IS_ERR(ctrl_dev))
+ return PTR_ERR(ctrl_dev);
serdev_adev = acpi_dev_get_first_match_dev(info->serdev_hid, NULL, -1);
if (!serdev_adev) {
pr_err("error could not get %s serdev adev\n", info->serdev_hid);
- goto put_ctrl_adev;
- }
-
- /* get_first_physical_node() returns a weak ref, no need to put() it */
- ctrl_dev = acpi_get_first_physical_node(ctrl_adev);
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s ctrl physical dev\n",
- info->ctrl_hid, info->ctrl_uid);
- goto put_serdev_adev;
- }
-
- /* ctrl_dev now points to the controller's parent, get the controller */
- ctrl_dev = device_find_child_by_name(ctrl_dev, info->ctrl_devname);
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s %s ctrl dev\n",
- info->ctrl_hid, info->ctrl_uid, info->ctrl_devname);
- goto put_serdev_adev;
+ goto put_ctrl_dev;
}
serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev));
@@ -283,8 +269,8 @@ static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int
put_serdev_adev:
acpi_dev_put(serdev_adev);
-put_ctrl_adev:
- acpi_dev_put(ctrl_adev);
+put_ctrl_dev:
+ put_device(ctrl_dev);
return ret;
}
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index f1c66a61bfc5..c297391955ad 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -116,6 +116,7 @@ static const struct x86_i2c_client_info lenovo_yb1_x90_i2c_clients[] __initconst
.trigger = ACPI_EDGE_SENSITIVE,
.polarity = ACPI_ACTIVE_LOW,
.con_id = "goodix_ts_irq",
+ .free_gpio = true,
},
}, {
/* Wacom Digitizer in keyboard half */
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index bc6bbf7ec6ea..278402dcb808 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -68,7 +68,7 @@ static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst =
},
};
-static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
+static struct gpiod_lookup_table acer_b1_750_nvt_ts_gpios = {
.dev_id = "i2c-NVT-ts",
.table = {
GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
@@ -77,7 +77,7 @@ static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
};
static struct gpiod_lookup_table * const acer_b1_750_gpios[] = {
- &acer_b1_750_goodix_gpios,
+ &acer_b1_750_nvt_ts_gpios,
&int3496_reference_gpios,
NULL
};
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 49fed9410adb..468993edfeee 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -39,6 +39,7 @@ struct x86_acpi_irq_data {
int index;
int trigger; /* ACPI_EDGE_SENSITIVE / ACPI_LEVEL_SENSITIVE */
int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */
+ bool free_gpio; /* Release GPIO after getting IRQ (for TYPE_GPIOINT) */
const char *con_id;
};
diff --git a/drivers/pmdomain/arm/scmi_perf_domain.c b/drivers/pmdomain/arm/scmi_perf_domain.c
index 709bbc448fad..d7ef46ccd9b8 100644
--- a/drivers/pmdomain/arm/scmi_perf_domain.c
+++ b/drivers/pmdomain/arm/scmi_perf_domain.c
@@ -159,6 +159,9 @@ static void scmi_perf_domain_remove(struct scmi_device *sdev)
struct genpd_onecell_data *scmi_pd_data = dev_get_drvdata(dev);
int i;
+ if (!scmi_pd_data)
+ return;
+
of_genpd_del_provider(dev->of_node);
for (i = 0; i < scmi_pd_data->num_domains; i++)
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index a1f6cba3ae6c..18e232b5ed53 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -1109,7 +1109,7 @@ static int __init genpd_power_off_unused(void)
return 0;
}
-late_initcall(genpd_power_off_unused);
+late_initcall_sync(genpd_power_off_unused);
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
index e26dc17d07ad..e274e3315fe7 100644
--- a/drivers/pmdomain/mediatek/mtk-pm-domains.c
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
@@ -561,6 +561,11 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
goto err_put_node;
}
+ /* recursive call to add all subdomains */
+ ret = scpsys_add_subdomain(scpsys, child);
+ if (ret)
+ goto err_put_node;
+
ret = pm_genpd_add_subdomain(parent_pd, child_pd);
if (ret) {
dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
@@ -570,11 +575,6 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
child_pd->name);
}
-
- /* recursive call to add all subdomains */
- ret = scpsys_add_subdomain(scpsys, child);
- if (ret)
- goto err_put_node;
}
return 0;
@@ -588,9 +588,6 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
{
int ret;
- if (scpsys_domain_is_on(pd))
- scpsys_power_off(&pd->genpd);
-
/*
* We're in the error cleanup already, so we only complain,
* but won't emit another error on top of the original one.
@@ -600,6 +597,8 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
dev_err(pd->scpsys->dev,
"failed to remove domain '%s' : %d - state may be inconsistent\n",
pd->genpd.name, ret);
+ if (scpsys_domain_is_on(pd))
+ scpsys_power_off(&pd->genpd);
clk_bulk_put(pd->num_clks, pd->clks);
clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index 3078896b1300..47df910645f6 100644
--- a/drivers/pmdomain/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
@@ -692,6 +692,7 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
unsigned int active_corner, sleep_corner;
unsigned int this_active_corner = 0, this_sleep_corner = 0;
unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+ unsigned int peer_enabled_corner;
if (pd->state_synced) {
to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
@@ -701,9 +702,11 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
this_sleep_corner = pd->level_count - 1;
}
- if (peer && peer->enabled)
- to_active_sleep(peer, peer->corner, &peer_active_corner,
+ if (peer && peer->enabled) {
+ peer_enabled_corner = max(peer->corner, peer->enable_corner);
+ to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
&peer_sleep_corner);
+ }
active_corner = max(this_active_corner, peer_active_corner);
diff --git a/drivers/pmdomain/renesas/r8a77980-sysc.c b/drivers/pmdomain/renesas/r8a77980-sysc.c
index 39ca84a67daa..621e411fc999 100644
--- a/drivers/pmdomain/renesas/r8a77980-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77980-sysc.c
@@ -25,7 +25,8 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
PD_CPU_NOCR },
{ "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
PD_CPU_NOCR },
- { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON },
+ { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON,
+ PD_CPU_NOCR },
{ "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON },
{ "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR },
{ "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index f21cb05815ec..3e31375491d5 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -978,6 +978,7 @@ config CHARGER_QCOM_SMB2
config FUEL_GAUGE_MM8013
tristate "Mitsumi MM8013 fuel gauge driver"
depends on I2C
+ select REGMAP_I2C
help
Say Y here to enable the Mitsumi MM8013 fuel gauge driver.
It enables the monitoring of many battery parameters, including
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 3a1798b0c1a7..9910c600743e 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -209,7 +209,9 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
- free_irq(client->irq, di);
+ if (client->irq)
+ free_irq(client->irq, di);
+
bq27xxx_battery_teardown(di);
mutex_lock(&battery_mutex);
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index a12e2a66d516..ec163d1bcd18 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -282,7 +282,6 @@ struct qcom_battmgr_wireless {
struct qcom_battmgr {
struct device *dev;
- struct auxiliary_device *adev;
struct pmic_glink_client *client;
enum qcom_battmgr_variant variant;
@@ -1294,69 +1293,11 @@ static void qcom_battmgr_enable_worker(struct work_struct *work)
dev_err(battmgr->dev, "failed to request power notifications\n");
}
-static char *qcom_battmgr_battery[] = { "battery" };
-
-static void qcom_battmgr_register_psy(struct qcom_battmgr *battmgr)
-{
- struct power_supply_config psy_cfg_supply = {};
- struct auxiliary_device *adev = battmgr->adev;
- struct power_supply_config psy_cfg = {};
- struct device *dev = &adev->dev;
-
- psy_cfg.drv_data = battmgr;
- psy_cfg.of_node = adev->dev.of_node;
-
- psy_cfg_supply.drv_data = battmgr;
- psy_cfg_supply.of_node = adev->dev.of_node;
- psy_cfg_supply.supplied_to = qcom_battmgr_battery;
- psy_cfg_supply.num_supplicants = 1;
-
- if (battmgr->variant == QCOM_BATTMGR_SC8280XP) {
- battmgr->bat_psy = devm_power_supply_register(dev, &sc8280xp_bat_psy_desc, &psy_cfg);
- if (IS_ERR(battmgr->bat_psy))
- dev_err(dev, "failed to register battery power supply (%ld)\n",
- PTR_ERR(battmgr->bat_psy));
-
- battmgr->ac_psy = devm_power_supply_register(dev, &sc8280xp_ac_psy_desc, &psy_cfg_supply);
- if (IS_ERR(battmgr->ac_psy))
- dev_err(dev, "failed to register AC power supply (%ld)\n",
- PTR_ERR(battmgr->ac_psy));
-
- battmgr->usb_psy = devm_power_supply_register(dev, &sc8280xp_usb_psy_desc, &psy_cfg_supply);
- if (IS_ERR(battmgr->usb_psy))
- dev_err(dev, "failed to register USB power supply (%ld)\n",
- PTR_ERR(battmgr->usb_psy));
-
- battmgr->wls_psy = devm_power_supply_register(dev, &sc8280xp_wls_psy_desc, &psy_cfg_supply);
- if (IS_ERR(battmgr->wls_psy))
- dev_err(dev, "failed to register wireless charing power supply (%ld)\n",
- PTR_ERR(battmgr->wls_psy));
- } else {
- battmgr->bat_psy = devm_power_supply_register(dev, &sm8350_bat_psy_desc, &psy_cfg);
- if (IS_ERR(battmgr->bat_psy))
- dev_err(dev, "failed to register battery power supply (%ld)\n",
- PTR_ERR(battmgr->bat_psy));
-
- battmgr->usb_psy = devm_power_supply_register(dev, &sm8350_usb_psy_desc, &psy_cfg_supply);
- if (IS_ERR(battmgr->usb_psy))
- dev_err(dev, "failed to register USB power supply (%ld)\n",
- PTR_ERR(battmgr->usb_psy));
-
- battmgr->wls_psy = devm_power_supply_register(dev, &sm8350_wls_psy_desc, &psy_cfg_supply);
- if (IS_ERR(battmgr->wls_psy))
- dev_err(dev, "failed to register wireless charing power supply (%ld)\n",
- PTR_ERR(battmgr->wls_psy));
- }
-}
-
static void qcom_battmgr_pdr_notify(void *priv, int state)
{
struct qcom_battmgr *battmgr = priv;
if (state == SERVREG_SERVICE_STATE_UP) {
- if (!battmgr->bat_psy)
- qcom_battmgr_register_psy(battmgr);
-
battmgr->service_up = true;
schedule_work(&battmgr->enable_work);
} else {
@@ -1371,9 +1312,13 @@ static const struct of_device_id qcom_battmgr_of_variants[] = {
{}
};
+static char *qcom_battmgr_battery[] = { "battery" };
+
static int qcom_battmgr_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
+ struct power_supply_config psy_cfg_supply = {};
+ struct power_supply_config psy_cfg = {};
const struct of_device_id *match;
struct qcom_battmgr *battmgr;
struct device *dev = &adev->dev;
@@ -1383,7 +1328,14 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev,
return -ENOMEM;
battmgr->dev = dev;
- battmgr->adev = adev;
+
+ psy_cfg.drv_data = battmgr;
+ psy_cfg.of_node = adev->dev.of_node;
+
+ psy_cfg_supply.drv_data = battmgr;
+ psy_cfg_supply.of_node = adev->dev.of_node;
+ psy_cfg_supply.supplied_to = qcom_battmgr_battery;
+ psy_cfg_supply.num_supplicants = 1;
INIT_WORK(&battmgr->enable_work, qcom_battmgr_enable_worker);
mutex_init(&battmgr->lock);
@@ -1395,6 +1347,43 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev,
else
battmgr->variant = QCOM_BATTMGR_SM8350;
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP) {
+ battmgr->bat_psy = devm_power_supply_register(dev, &sc8280xp_bat_psy_desc, &psy_cfg);
+ if (IS_ERR(battmgr->bat_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->bat_psy),
+ "failed to register battery power supply\n");
+
+ battmgr->ac_psy = devm_power_supply_register(dev, &sc8280xp_ac_psy_desc, &psy_cfg_supply);
+ if (IS_ERR(battmgr->ac_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->ac_psy),
+ "failed to register AC power supply\n");
+
+ battmgr->usb_psy = devm_power_supply_register(dev, &sc8280xp_usb_psy_desc, &psy_cfg_supply);
+ if (IS_ERR(battmgr->usb_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->usb_psy),
+ "failed to register USB power supply\n");
+
+ battmgr->wls_psy = devm_power_supply_register(dev, &sc8280xp_wls_psy_desc, &psy_cfg_supply);
+ if (IS_ERR(battmgr->wls_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->wls_psy),
+ "failed to register wireless charing power supply\n");
+ } else {
+ battmgr->bat_psy = devm_power_supply_register(dev, &sm8350_bat_psy_desc, &psy_cfg);
+ if (IS_ERR(battmgr->bat_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->bat_psy),
+ "failed to register battery power supply\n");
+
+ battmgr->usb_psy = devm_power_supply_register(dev, &sm8350_usb_psy_desc, &psy_cfg_supply);
+ if (IS_ERR(battmgr->usb_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->usb_psy),
+ "failed to register USB power supply\n");
+
+ battmgr->wls_psy = devm_power_supply_register(dev, &sm8350_wls_psy_desc, &psy_cfg_supply);
+ if (IS_ERR(battmgr->wls_psy))
+ return dev_err_probe(dev, PTR_ERR(battmgr->wls_psy),
+ "failed to register wireless charing power supply\n");
+ }
+
battmgr->client = devm_pmic_glink_register_client(dev,
PMIC_GLINK_OWNER_BATTMGR,
qcom_battmgr_callback,
diff --git a/drivers/regulator/max5970-regulator.c b/drivers/regulator/max5970-regulator.c
index bc88a40a88d4..8bbcd983a74a 100644
--- a/drivers/regulator/max5970-regulator.c
+++ b/drivers/regulator/max5970-regulator.c
@@ -29,8 +29,8 @@ struct max5970_regulator {
};
enum max597x_regulator_id {
- MAX597X_SW0,
- MAX597X_SW1,
+ MAX597X_sw0,
+ MAX597X_sw1,
};
static int max5970_read_adc(struct regmap *regmap, int reg, long *val)
@@ -378,8 +378,8 @@ static int max597x_dt_parse(struct device_node *np,
}
static const struct regulator_desc regulators[] = {
- MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"),
- MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"),
+ MAX597X_SWITCH(sw0, MAX5970_REG_CHXEN, 0, "vss1"),
+ MAX597X_SWITCH(sw1, MAX5970_REG_CHXEN, 1, "vss2"),
};
static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
@@ -392,7 +392,7 @@ static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
return ret;
if (*val)
- return regmap_write(map, reg, *val);
+ return regmap_write(map, reg, 0);
return 0;
}
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 698c420e0869..60cfcd741c2a 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -157,7 +157,17 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
pwm_get_state(drvdata->pwm, &pstate);
+ if (!pstate.enabled) {
+ if (pstate.polarity == PWM_POLARITY_INVERSED)
+ pstate.duty_cycle = pstate.period;
+ else
+ pstate.duty_cycle = 0;
+ }
+
voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
+ if (voltage < min(max_uV_duty, min_uV_duty) ||
+ voltage > max(max_uV_duty, min_uV_duty))
+ return -ENOTRECOVERABLE;
/*
* The dutycycle for min_uV might be greater than the one for max_uV.
@@ -313,6 +323,32 @@ static int pwm_regulator_init_continuous(struct platform_device *pdev,
return 0;
}
+static int pwm_regulator_init_boot_on(struct platform_device *pdev,
+ struct pwm_regulator_data *drvdata,
+ const struct regulator_init_data *init_data)
+{
+ struct pwm_state pstate;
+
+ if (!init_data->constraints.boot_on || drvdata->enb_gpio)
+ return 0;
+
+ pwm_get_state(drvdata->pwm, &pstate);
+ if (pstate.enabled)
+ return 0;
+
+ /*
+ * Update the duty cycle so the output does not change
+ * when the regulator core enables the regulator (and
+ * thus the PWM channel).
+ */
+ if (pstate.polarity == PWM_POLARITY_INVERSED)
+ pstate.duty_cycle = pstate.period;
+ else
+ pstate.duty_cycle = 0;
+
+ return pwm_apply_might_sleep(drvdata->pwm, &pstate);
+}
+
static int pwm_regulator_probe(struct platform_device *pdev)
{
const struct regulator_init_data *init_data;
@@ -372,6 +408,13 @@ static int pwm_regulator_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = pwm_regulator_init_boot_on(pdev, drvdata, init_data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to apply boot_on settings: %d\n",
+ ret);
+ return ret;
+ }
+
regulator = devm_regulator_register(&pdev->dev,
&drvdata->desc, &config);
if (IS_ERR(regulator)) {
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index f48214e2c3b4..04133510e5af 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -726,9 +726,25 @@ static int ti_abb_probe(struct platform_device *pdev)
return PTR_ERR(abb->setup_reg);
}
- abb->int_base = devm_platform_ioremap_resource_byname(pdev, "int-address");
- if (IS_ERR(abb->int_base))
- return PTR_ERR(abb->int_base);
+ pname = "int-address";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+ if (!res) {
+ dev_err(dev, "Missing '%s' IO resource\n", pname);
+ return -ENODEV;
+ }
+ /*
+ * The MPU interrupt status register (PRM_IRQSTATUS_MPU) is
+ * shared between regulator-abb-{ivahd,dspeve,gpu} driver
+ * instances. Therefore use devm_ioremap() rather than
+ * devm_platform_ioremap_resource_byname() to avoid busy
+ * resource region conflicts.
+ */
+ abb->int_base = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!abb->int_base) {
+ dev_err(dev, "Unable to map '%s'\n", pname);
+ return -ENOMEM;
+ }
/* Map Optional resources */
pname = "efuse-address";
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index c533d1dadc6b..a5dba3829769 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -202,7 +202,8 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
- if (cdev->private->state == DEV_STATE_VERIFY) {
+ if (cdev->private->state == DEV_STATE_VERIFY ||
+ cdev->private->flags.doverify) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
@@ -214,8 +215,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
}
if (cdev->private->state != DEV_STATE_ONLINE ||
((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
- !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
- cdev->private->flags.doverify)
+ !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
return -EBUSY;
ret = cio_set_options (sch, flags);
if (ret)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b92a32b4b114..04c64ce0a1ca 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -255,9 +255,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
if (!recover) {
hash_del(&addr->hnode);
kfree(addr);
- continue;
+ } else {
+ /* prepare for recovery */
+ addr->disp_flag = QETH_DISP_ADDR_ADD;
}
- addr->disp_flag = QETH_DISP_ADDR_ADD;
}
mutex_unlock(&card->ip_lock);
@@ -278,9 +279,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
rc = qeth_l3_register_addr_entry(card, addr);
- if (!rc) {
+ if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
+ /* keep it in the records */
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
} else {
+ /* bad address */
hash_del(&addr->hnode);
kfree(addr);
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index addac7fbe37b..9ce27092729c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1270,7 +1270,7 @@ source "drivers/scsi/arm/Kconfig"
config JAZZ_ESP
bool "MIPS JAZZ FAS216 SCSI support"
- depends on MACH_JAZZ && SCSI
+ depends on MACH_JAZZ && SCSI=y
select SCSI_SPI_ATTRS
help
This is the driver for the onboard SCSI host adapter of MIPS Magnum
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 19eee108db02..5c8d1ba3f8f3 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -319,17 +319,16 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *sel;
struct fcoe_fcf *fcf;
- unsigned long flags;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req);
fip->flogi_req = NULL;
list_for_each_entry(fcf, &fip->fcfs, list)
fcf->flogi_sent = 0;
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
sel = fip->sel_fcf;
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
@@ -700,7 +699,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
{
struct fc_frame *fp;
struct fc_frame_header *fh;
- unsigned long flags;
u16 old_xid;
u8 op;
u8 mac[ETH_ALEN];
@@ -734,11 +732,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
op = FIP_DT_FLOGI;
if (fip->mode == FIP_MODE_VN2VN)
break;
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req);
fip->flogi_req = skb;
fip->flogi_req_send = 1;
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
schedule_work(&fip->timer_work);
return -EINPROGRESS;
case ELS_FDISC:
@@ -1707,11 +1705,10 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
- unsigned long flags;
int error;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
fcf = fcoe_ctlr_select(fip);
if (!fcf || fcf->flogi_sent) {
@@ -1722,7 +1719,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
fcoe_ctlr_solicit(fip, NULL);
error = fcoe_ctlr_flogi_send_locked(fip);
}
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
mutex_unlock(&fip->ctlr_mutex);
return error;
}
@@ -1739,9 +1736,8 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
- unsigned long flags;
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
fcf = fip->sel_fcf;
if (!fcf || !fip->flogi_req_send)
goto unlock;
@@ -1768,7 +1764,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
} else /* XXX */
LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
unlock:
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
}
/**
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 2074937c05bc..ce73f08ee889 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -305,6 +305,7 @@ struct fnic {
unsigned int copy_wq_base;
struct work_struct link_work;
struct work_struct frame_work;
+ struct work_struct flush_work;
struct sk_buff_head frame_queue;
struct sk_buff_head tx_queue;
@@ -363,7 +364,7 @@ void fnic_handle_event(struct work_struct *work);
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
int fnic_alloc_rq_frame(struct vnic_rq *rq);
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
-void fnic_flush_tx(struct fnic *);
+void fnic_flush_tx(struct work_struct *work);
void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
void fnic_update_mac(struct fc_lport *, u8 *new);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 5e312a55cc7d..a08293b2ad9f 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -1182,7 +1182,7 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
/**
* fnic_flush_tx() - send queued frames.
- * @fnic: fnic device
+ * @work: pointer to work element
*
* Send frames that were waiting to go out in FC or Ethernet mode.
* Whenever changing modes we purge queued frames, so these frames should
@@ -1190,8 +1190,9 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
*
* Called without fnic_lock held.
*/
-void fnic_flush_tx(struct fnic *fnic)
+void fnic_flush_tx(struct work_struct *work)
{
+ struct fnic *fnic = container_of(work, struct fnic, flush_work);
struct sk_buff *skb;
struct fc_frame *fp;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 5ed1d897311a..29eead383eb9 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -830,6 +830,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
+ INIT_WORK(&fnic->flush_work, fnic_flush_tx);
skb_queue_head_init(&fnic->fip_frame_queue);
INIT_LIST_HEAD(&fnic->evlist);
INIT_LIST_HEAD(&fnic->vlans);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 8d7fc5284293..fc4cee91b175 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -680,7 +680,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_flush_tx(fnic);
+ queue_work(fnic_event_queue, &fnic->flush_work);
reset_cmpl_handler_end:
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
@@ -736,7 +736,7 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_flush_tx(fnic);
+ queue_work(fnic_event_queue, &fnic->flush_work);
queue_work(fnic_event_queue, &fnic->frame_work);
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 2a50fda3a628..625fd547ee60 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -371,7 +371,6 @@ static u16 initio_se2_rd(unsigned long base, u8 addr)
*/
static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
{
- u8 rb;
u8 instr;
int i;
@@ -400,7 +399,7 @@ static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
udelay(30);
outb(SE2CS, base + TUL_NVRAM); /* -CLK */
udelay(30);
- if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
+ if (inb(base + TUL_NVRAM) & SE2DI)
break; /* write complete */
}
outb(0, base + TUL_NVRAM); /* -CS */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 71f711cb0628..355a0bc0828e 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -3387,7 +3387,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost,
return SCI_FAILURE;
}
- return SCI_SUCCESS;
+ return status;
}
static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d26941b131fd..bf879d81846b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1918,7 +1918,7 @@ out:
*
* Returns the number of SGEs added to the SGL.
**/
-static int
+static uint32_t
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct sli4_sge *sgl, int datasegcnt,
struct lpfc_io_buf *lpfc_cmd)
@@ -1926,8 +1926,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct scatterlist *sgde = NULL; /* s/g data entry */
struct sli4_sge_diseed *diseed = NULL;
dma_addr_t physaddr;
- int i = 0, num_sge = 0, status;
- uint32_t reftag;
+ int i = 0, status;
+ uint32_t reftag, num_sge = 0;
uint8_t txop, rxop;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc;
@@ -2099,7 +2099,7 @@ out:
*
* Returns the number of SGEs added to the SGL.
**/
-static int
+static uint32_t
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct sli4_sge *sgl, int datacnt, int protcnt,
struct lpfc_io_buf *lpfc_cmd)
@@ -2123,8 +2123,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t rc;
#endif
uint32_t checking = 1;
- uint32_t dma_offset = 0;
- int num_sge = 0, j = 2;
+ uint32_t dma_offset = 0, num_sge = 0;
+ int j = 2;
struct sli4_hybrid_sgl *sgl_xtra = NULL;
sgpe = scsi_prot_sglist(sc);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index c0c8ab586957..d32ad46318cb 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -1671,7 +1671,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
void
mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
{
- struct host_port h_port[64];
+ struct host_port *h_port = NULL;
int i, j, found, host_port_count = 0, port_idx;
u16 sz, attached_handle, ioc_status;
struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
@@ -1685,6 +1685,10 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_io_unit_pg0)
return;
+ h_port = kcalloc(64, sizeof(struct host_port), GFP_KERNEL);
+ if (!h_port)
+ goto out;
+
if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -1814,6 +1818,7 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
}
}
out:
+ kfree(h_port);
kfree(sas_io_unit_pg0);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8761bc58d965..b8120ca93c79 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -7378,7 +7378,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
return -EFAULT;
}
- issue_diag_reset:
+ return 0;
+
+issue_diag_reset:
rc = _base_diag_reset(ioc);
return rc;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 76d369343c7a..8cad9792a562 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -328,21 +328,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
return result + 4;
}
+enum scsi_vpd_parameters {
+ SCSI_VPD_HEADER_SIZE = 4,
+ SCSI_VPD_LIST_SIZE = 36,
+};
+
static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
{
- unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+ unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
int result;
if (sdev->no_vpd_size)
return SCSI_DEFAULT_VPD_LEN;
/*
+ * Fetch the supported pages VPD and validate that the requested page
+ * number is present.
+ */
+ if (page != 0) {
+ result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
+ if (result < SCSI_VPD_HEADER_SIZE)
+ return 0;
+
+ result -= SCSI_VPD_HEADER_SIZE;
+ if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
+ return 0;
+ }
+ /*
* Fetch the VPD page header to find out how big the page
* is. This is done to prevent problems on legacy devices
* which can not handle allocation lengths as large as
* potentially requested by the caller.
*/
- result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
+ result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
if (result < 0)
return 0;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 79da4b1c1df0..612489afe8d2 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -61,11 +61,11 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *,
struct scsi_cmnd *);
-void scsi_eh_wakeup(struct Scsi_Host *shost)
+void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy)
{
lockdep_assert_held(shost->host_lock);
- if (scsi_host_busy(shost) == shost->host_failed) {
+ if (busy == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
@@ -88,7 +88,7 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
shost->host_eh_scheduled++;
- scsi_eh_wakeup(shost);
+ scsi_eh_wakeup(shost, scsi_host_busy(shost));
}
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -282,11 +282,12 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
{
struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
struct Scsi_Host *shost = scmd->device->host;
+ unsigned int busy = scsi_host_busy(shost);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
shost->host_failed++;
- scsi_eh_wakeup(shost);
+ scsi_eh_wakeup(shost, busy);
spin_unlock_irqrestore(shost->host_lock, flags);
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index cf3864f72093..df5ac03d5d6c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -278,9 +278,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
rcu_read_lock();
__clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
if (unlikely(scsi_host_in_recovery(shost))) {
+ unsigned int busy = scsi_host_busy(shost);
+
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
- scsi_eh_wakeup(shost);
+ scsi_eh_wakeup(shost, busy);
spin_unlock_irqrestore(shost->host_lock, flags);
}
rcu_read_unlock();
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 3f0dfb97db6b..1fbfe1b52c9f 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -92,7 +92,7 @@ extern void scmd_eh_abort_handler(struct work_struct *work);
extern enum blk_eh_timer_return scsi_timeout(struct request *req);
extern int scsi_error_handler(void *host);
extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
-extern void scsi_eh_wakeup(struct Scsi_Host *shost);
+extern void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy);
extern void scsi_eh_scmd_add(struct scsi_cmnd *);
void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *work_q,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0833b3e6aa6e..bdd0acf7fa3c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3407,6 +3407,24 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
return true;
}
+static void sd_read_block_zero(struct scsi_disk *sdkp)
+{
+ unsigned int buf_len = sdkp->device->sector_size;
+ char *buffer, cmd[10] = { };
+
+ buffer = kmalloc(buf_len, GFP_KERNEL);
+ if (!buffer)
+ return;
+
+ cmd[0] = READ_10;
+ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+
+ scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
+ SD_TIMEOUT, sdkp->max_retries, NULL);
+ kfree(buffer);
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -3446,7 +3464,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
-
+ /*
+ * Some USB/UAS devices return generic values for mode pages
+ * until the media has been accessed. Trigger a READ operation
+ * to force the device to populate mode pages.
+ */
+ if (sdp->read_before_ms)
+ sd_read_block_zero(sdkp);
/*
* set the default to rotational. All non-rotational devices
* support the block characteristics VPD page, which will
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index ceff1ec13f9e..385180c98be4 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -6533,8 +6533,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ if (!ctrl_info->disable_managed_interrupts)
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
ctrl_info->pci_dev, 0);
+ else
+ return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index a95936b18f69..7ceb982040a5 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -330,6 +330,7 @@ enum storvsc_request_type {
*/
static int storvsc_ringbuffer_size = (128 * 1024);
+static int aligned_ringbuffer_size;
static u32 max_outstanding_req_per_channel;
static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
@@ -687,8 +688,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
new_sc->next_request_id_callback = storvsc_next_request_id;
ret = vmbus_open(new_sc,
- storvsc_ringbuffer_size,
- storvsc_ringbuffer_size,
+ aligned_ringbuffer_size,
+ aligned_ringbuffer_size,
(void *)&props,
sizeof(struct vmstorage_channel_properties),
storvsc_on_channel_callback, new_sc);
@@ -1973,7 +1974,7 @@ static int storvsc_probe(struct hv_device *device,
dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
stor_device->port_number = host->host_no;
- ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
+ ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc);
if (ret)
goto err_out1;
@@ -2164,7 +2165,7 @@ static int storvsc_resume(struct hv_device *hv_dev)
{
int ret;
- ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
+ ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size,
hv_dev_is_fc(hv_dev));
return ret;
}
@@ -2198,8 +2199,9 @@ static int __init storvsc_drv_init(void)
* the ring buffer indices) by the max request size (which is
* vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
*/
+ aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size);
max_outstanding_req_per_channel =
- ((storvsc_ringbuffer_size - PAGE_SIZE) /
+ ((aligned_ringbuffer_size - PAGE_SIZE) /
ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
sizeof(struct vstor_packet) + sizeof(u64),
sizeof(u64)));
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 4cf20be668a6..617eb892f4ad 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -188,8 +188,6 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi,
while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
fn(vscsi, buf);
- if (unlikely(virtqueue_is_broken(vq)))
- break;
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
}
diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c
index 780199bf351e..49a0955e82d6 100644
--- a/drivers/soc/apple/mailbox.c
+++ b/drivers/soc/apple/mailbox.c
@@ -296,14 +296,14 @@ struct apple_mbox *apple_mbox_get(struct device *dev, int index)
of_node_put(args.np);
if (!pdev)
- return ERR_PTR(EPROBE_DEFER);
+ return ERR_PTR(-EPROBE_DEFER);
mbox = platform_get_drvdata(pdev);
if (!mbox)
- return ERR_PTR(EPROBE_DEFER);
+ return ERR_PTR(-EPROBE_DEFER);
if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER))
- return ERR_PTR(ENODEV);
+ return ERR_PTR(-ENODEV);
return mbox;
}
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
index 9b0fdd95276e..19f4b576f822 100644
--- a/drivers/soc/microchip/Kconfig
+++ b/drivers/soc/microchip/Kconfig
@@ -1,5 +1,5 @@
config POLARFIRE_SOC_SYS_CTRL
- tristate "POLARFIRE_SOC_SYS_CTRL"
+ tristate "Microchip PolarFire SoC (MPFS) system controller support"
depends on POLARFIRE_SOC_MAILBOX
depends on MTD
help
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index f4bfd24386f1..f913e9bd57ed 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -265,10 +265,17 @@ static int pmic_glink_probe(struct platform_device *pdev)
pg->client_mask = *match_data;
+ pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
+ if (IS_ERR(pg->pdr)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr),
+ "failed to initialize pdr\n");
+ return ret;
+ }
+
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
if (ret)
- return ret;
+ goto out_release_pdr_handle;
}
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
@@ -281,17 +288,11 @@ static int pmic_glink_probe(struct platform_device *pdev)
goto out_release_altmode_aux;
}
- pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
- if (IS_ERR(pg->pdr)) {
- ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
- goto out_release_aux_devices;
- }
-
service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
if (IS_ERR(service)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
"failed adding pdr lookup for charger_pd\n");
- goto out_release_pdr_handle;
+ goto out_release_aux_devices;
}
mutex_lock(&__pmic_glink_lock);
@@ -300,8 +301,6 @@ static int pmic_glink_probe(struct platform_device *pdev)
return 0;
-out_release_pdr_handle:
- pdr_handle_release(pg->pdr);
out_release_aux_devices:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
pmic_glink_del_aux_device(pg, &pg->ps_aux);
@@ -311,6 +310,8 @@ out_release_altmode_aux:
out_release_ucsi_aux:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
+out_release_pdr_handle:
+ pdr_handle_release(pg->pdr);
return ret;
}
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index 5fcd0fdd2faa..b3808fc24c69 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -76,7 +76,7 @@ struct pmic_glink_altmode_port {
struct work_struct work;
- struct device *bridge;
+ struct auxiliary_device *bridge;
enum typec_orientation orientation;
u16 svid;
@@ -230,7 +230,7 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
else
pmic_glink_altmode_enable_usb(altmode, alt_port);
- drm_aux_hpd_bridge_notify(alt_port->bridge,
+ drm_aux_hpd_bridge_notify(&alt_port->bridge->dev,
alt_port->hpd_state ?
connector_status_connected :
connector_status_disconnected);
@@ -454,7 +454,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->index = port;
INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
- alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode));
+ alt_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode));
if (IS_ERR(alt_port->bridge)) {
fwnode_handle_put(fwnode);
return PTR_ERR(alt_port->bridge);
@@ -510,6 +510,16 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
}
}
+ for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) {
+ alt_port = &altmode->ports[port];
+ if (!alt_port->bridge)
+ continue;
+
+ ret = devm_drm_dp_hpd_bridge_add(dev, alt_port->bridge);
+ if (ret)
+ return ret;
+ }
+
altmode->client = devm_pmic_glink_register_client(dev,
altmode->owner_id,
pmic_glink_altmode_callback,
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index d96222e6d7d2..cfdaa5eaec76 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi-mem.h>
+#include <linux/mtd/spi-nor.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include "spi-bcm-qspi.h"
@@ -1221,7 +1221,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
/* non-aligned and very short transfers are handled by MSPI */
if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
- len < 4)
+ len < 4 || op->cmd.opcode == SPINOR_OP_RDSFDP)
mspi_read = true;
if (!has_bspi(qspi) || mspi_read)
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index f94e0d370d46..1a8d03958dff 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1927,24 +1927,18 @@ static void cqspi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int cqspi_suspend(struct device *dev)
+static int cqspi_runtime_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
- struct spi_controller *host = dev_get_drvdata(dev);
- int ret;
- ret = spi_controller_suspend(host);
cqspi_controller_enable(cqspi, 0);
-
clk_disable_unprepare(cqspi->clk);
-
- return ret;
+ return 0;
}
-static int cqspi_resume(struct device *dev)
+static int cqspi_runtime_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
- struct spi_controller *host = dev_get_drvdata(dev);
clk_prepare_enable(cqspi->clk);
cqspi_wait_idle(cqspi);
@@ -1952,12 +1946,27 @@ static int cqspi_resume(struct device *dev)
cqspi->current_cs = -1;
cqspi->sclk = 0;
+ return 0;
+}
+
+static int cqspi_suspend(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ return spi_controller_suspend(cqspi->host);
+}
- return spi_controller_resume(host);
+static int cqspi_resume(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ return spi_controller_resume(cqspi->host);
}
-static DEFINE_RUNTIME_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend,
- cqspi_resume, NULL);
+static const struct dev_pm_ops cqspi_dev_pm_ops = {
+ RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume)
+};
static const struct cqspi_driver_platdata cdns_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index a50eb4db79de..e5140532071d 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -317,6 +317,15 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
xspi->rx_bytes -= nrx;
while (ntx || nrx) {
+ if (nrx) {
+ u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
+
+ if (xspi->rxbuf)
+ *xspi->rxbuf++ = data;
+
+ nrx--;
+ }
+
if (ntx) {
if (xspi->txbuf)
cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
@@ -326,14 +335,6 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
ntx--;
}
- if (nrx) {
- u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
-
- if (xspi->rxbuf)
- *xspi->rxbuf++ = data;
-
- nrx--;
- }
}
}
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index f13073e12593..b24190526ce9 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -244,7 +244,10 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
priv->ctlr->use_gpio_descriptors = true;
priv->ctlr->auto_runtime_pm = true;
- devm_pm_runtime_enable(priv->dev);
+ ret = devm_pm_runtime_enable(priv->dev);
+ if (ret)
+ return ret;
+
pm_runtime_idle(priv->dev);
regmap_write(priv->regmap, CS42L43_TRAN_CONFIG6, CS42L43_FIFO_SIZE - 1);
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
index 9d22018f7985..1301d14483d4 100644
--- a/drivers/spi/spi-hisi-sfc-v3xx.c
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -377,6 +377,11 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
{
struct hisi_sfc_v3xx_host *host = data;
+ u32 reg;
+
+ reg = readl(host->regbase + HISI_SFC_V3XX_INT_STAT);
+ if (!reg)
+ return IRQ_NONE;
hisi_sfc_v3xx_disable_int(host);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 272bc871a848..833a1bb7a914 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -2,6 +2,7 @@
// Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
// Copyright (C) 2008 Juergen Beisert
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -660,15 +661,15 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
<< MX51_ECSPI_CTRL_BL_OFFSET;
else {
if (spi_imx->usedma) {
- ctrl |= (spi_imx->bits_per_word *
- spi_imx_bytes_per_word(spi_imx->bits_per_word) - 1)
+ ctrl |= (spi_imx->bits_per_word - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
} else {
if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
- ctrl |= (MX51_ECSPI_CTRL_MAX_BURST - 1)
+ ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else
- ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
+ ctrl |= spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
+ BITS_PER_BYTE) * spi_imx->bits_per_word
<< MX51_ECSPI_CTRL_BL_OFFSET;
}
}
@@ -1344,7 +1345,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
controller->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(controller->dma_tx)) {
ret = PTR_ERR(controller->dma_tx);
- dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
+ dev_err_probe(dev, ret, "can't get the TX DMA channel!\n");
controller->dma_tx = NULL;
goto err;
}
@@ -1353,7 +1354,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
controller->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(controller->dma_rx)) {
ret = PTR_ERR(controller->dma_rx);
- dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
+ dev_err_probe(dev, ret, "can't get the RX DMA channel!\n");
controller->dma_rx = NULL;
goto err;
}
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index 57d767a68e7b..4337ca51d7aa 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -76,6 +76,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7f24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x9d24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x9da4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&cnl_info },
@@ -84,7 +85,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xae23), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 1bf080339b5a..88cbe4f00cc3 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -39,6 +39,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/mxs-spi.h>
#include <trace/events/spi.h>
+#include <linux/dma/mxs-dma.h>
#define DRIVER_NAME "mxs-spi"
@@ -252,7 +253,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
desc = dmaengine_prep_slave_sg(ssp->dmach,
&dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
if (!desc) {
dev_err(ssp->dev,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index a0c9fea908f5..ddf1c684bcc7 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -53,8 +53,6 @@
/* per-register bitmasks: */
#define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17)
-#define OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY BIT(0)
-#define OMAP2_MCSPI_IRQSTATUS_RX0_FULL BIT(2)
#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
@@ -293,7 +291,7 @@ static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
}
static void omap2_mcspi_set_fifo(const struct spi_device *spi,
- struct spi_transfer *t, int enable, int dma_enabled)
+ struct spi_transfer *t, int enable)
{
struct spi_controller *ctlr = spi->controller;
struct omap2_mcspi_cs *cs = spi->controller_state;
@@ -314,28 +312,20 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
else
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
- if (dma_enabled)
- wcnt = t->len / bytes_per_word;
- else
- wcnt = 0;
+
+ wcnt = t->len / bytes_per_word;
if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
goto disable_fifo;
xferlevel = wcnt << 16;
if (t->rx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFER;
- if (dma_enabled)
- xferlevel |= (bytes_per_word - 1) << 8;
- else
- xferlevel |= (max_fifo_depth - 1) << 8;
+ xferlevel |= (bytes_per_word - 1) << 8;
}
if (t->tx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFET;
- if (dma_enabled)
- xferlevel |= bytes_per_word - 1;
- else
- xferlevel |= (max_fifo_depth - 1);
+ xferlevel |= bytes_per_word - 1;
}
mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
@@ -892,113 +882,6 @@ out:
return count - c;
}
-static unsigned
-omap2_mcspi_txrx_piofifo(struct spi_device *spi, struct spi_transfer *xfer)
-{
- struct omap2_mcspi_cs *cs = spi->controller_state;
- struct omap2_mcspi *mcspi;
- unsigned int count, c;
- unsigned int iter, cwc;
- int last_request;
- void __iomem *base = cs->base;
- void __iomem *tx_reg;
- void __iomem *rx_reg;
- void __iomem *chstat_reg;
- void __iomem *irqstat_reg;
- int word_len, bytes_per_word;
- u8 *rx;
- const u8 *tx;
-
- mcspi = spi_controller_get_devdata(spi->controller);
- count = xfer->len;
- c = count;
- word_len = cs->word_len;
- bytes_per_word = mcspi_bytes_per_word(word_len);
-
- /*
- * We store the pre-calculated register addresses on stack to speed
- * up the transfer loop.
- */
- tx_reg = base + OMAP2_MCSPI_TX0;
- rx_reg = base + OMAP2_MCSPI_RX0;
- chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
- irqstat_reg = base + OMAP2_MCSPI_IRQSTATUS;
-
- if (c < (word_len >> 3))
- return 0;
-
- rx = xfer->rx_buf;
- tx = xfer->tx_buf;
-
- do {
- /* calculate number of words in current iteration */
- cwc = min((unsigned int)mcspi->fifo_depth / bytes_per_word,
- c / bytes_per_word);
- last_request = cwc != (mcspi->fifo_depth / bytes_per_word);
- if (tx) {
- if (mcspi_wait_for_reg_bit(irqstat_reg,
- OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY) < 0) {
- dev_err(&spi->dev, "TX Empty timed out\n");
- goto out;
- }
- writel_relaxed(OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY, irqstat_reg);
-
- for (iter = 0; iter < cwc; iter++, tx += bytes_per_word) {
- if (bytes_per_word == 1)
- writel_relaxed(*tx, tx_reg);
- else if (bytes_per_word == 2)
- writel_relaxed(*((u16 *)tx), tx_reg);
- else if (bytes_per_word == 4)
- writel_relaxed(*((u32 *)tx), tx_reg);
- }
- }
-
- if (rx) {
- if (!last_request &&
- mcspi_wait_for_reg_bit(irqstat_reg,
- OMAP2_MCSPI_IRQSTATUS_RX0_FULL) < 0) {
- dev_err(&spi->dev, "RX_FULL timed out\n");
- goto out;
- }
- writel_relaxed(OMAP2_MCSPI_IRQSTATUS_RX0_FULL, irqstat_reg);
-
- for (iter = 0; iter < cwc; iter++, rx += bytes_per_word) {
- if (last_request &&
- mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_RXS) < 0) {
- dev_err(&spi->dev, "RXS timed out\n");
- goto out;
- }
- if (bytes_per_word == 1)
- *rx = readl_relaxed(rx_reg);
- else if (bytes_per_word == 2)
- *((u16 *)rx) = readl_relaxed(rx_reg);
- else if (bytes_per_word == 4)
- *((u32 *)rx) = readl_relaxed(rx_reg);
- }
- }
-
- if (last_request) {
- if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_EOT) < 0) {
- dev_err(&spi->dev, "EOT timed out\n");
- goto out;
- }
- if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_TXFFE) < 0) {
- dev_err(&spi->dev, "TXFFE timed out\n");
- goto out;
- }
- omap2_mcspi_set_enable(spi, 0);
- }
- c -= cwc * bytes_per_word;
- } while (c >= bytes_per_word);
-
-out:
- omap2_mcspi_set_enable(spi, 1);
- return count - c;
-}
-
static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
{
u32 div;
@@ -1323,9 +1206,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
ctlr->cur_msg_mapped &&
ctlr->can_dma(ctlr, spi, t))
- omap2_mcspi_set_fifo(spi, t, 1, 1);
- else if (t->len > OMAP2_MCSPI_MAX_FIFODEPTH)
- omap2_mcspi_set_fifo(spi, t, 1, 0);
+ omap2_mcspi_set_fifo(spi, t, 1);
omap2_mcspi_set_enable(spi, 1);
@@ -1338,8 +1219,6 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
ctlr->cur_msg_mapped &&
ctlr->can_dma(ctlr, spi, t))
count = omap2_mcspi_txrx_dma(spi, t);
- else if (mcspi->fifo_depth > 0)
- count = omap2_mcspi_txrx_piofifo(spi, t);
else
count = omap2_mcspi_txrx_pio(spi, t);
@@ -1352,7 +1231,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
omap2_mcspi_set_enable(spi, 0);
if (mcspi->fifo_depth > 0)
- omap2_mcspi_set_fifo(spi, t, 0, 0);
+ omap2_mcspi_set_fifo(spi, t, 0);
out:
/* Restore defaults if they were overriden */
@@ -1375,7 +1254,7 @@ out:
omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
if (mcspi->fifo_depth > 0 && t)
- omap2_mcspi_set_fifo(spi, t, 0, 0);
+ omap2_mcspi_set_fifo(spi, t, 0);
return status;
}
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 03aab661be9d..82d6264841fc 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -25,11 +25,13 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/wait.h>
+#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
@@ -166,10 +168,8 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
int scr;
u8 cdm = 0;
u32 speed;
- u8 bits_per_word;
/* Start with the generic configuration for this device. */
- bits_per_word = spi->bits_per_word;
speed = spi->max_speed_hz;
/*
@@ -177,9 +177,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
* the transfer to overwrite the generic configuration with zeros.
*/
if (t) {
- if (t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
if (t->speed_hz)
speed = min(t->speed_hz, spi->max_speed_hz);
}
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index cfc3b1ddbd22..6f12e4fb2e2e 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -136,14 +136,14 @@ struct sh_msiof_spi_priv {
/* SIFCTR */
#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */
-#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */
-#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */
-#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */
-#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */
-#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */
-#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */
-#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */
-#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */
#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */
#define SIFCTR_TFUA_SHIFT 20
#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 7477a11e12be..f2170f4b5077 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1717,6 +1717,10 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
pm_runtime_put_noidle(ctlr->dev.parent);
dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+
return ret;
}
}
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index e748a5d04e97..9149d41fe65b 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -608,7 +608,7 @@ static void ad5933_work(struct work_struct *work)
struct ad5933_state, work.work);
struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
__be16 buf[2];
- int val[2];
+ u16 val[2];
unsigned char status;
int ret;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index f44e6412f4e3..d0db2efe0045 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -3723,12 +3723,10 @@ apply_min_padding:
static int atomisp_set_crop(struct atomisp_device *isp,
const struct v4l2_mbus_framefmt *format,
+ struct v4l2_subdev_state *sd_state,
int which)
{
struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_selection sel = {
.which = which,
.target = V4L2_SEL_TGT_CROP,
@@ -3754,7 +3752,7 @@ static int atomisp_set_crop(struct atomisp_device *isp,
sel.r.left = ((input->native_rect.width - sel.r.width) / 2) & ~1;
sel.r.top = ((input->native_rect.height - sel.r.height) / 2) & ~1;
- ret = v4l2_subdev_call(input->camera, pad, set_selection, &pad_state, &sel);
+ ret = v4l2_subdev_call(input->camera, pad, set_selection, sd_state, &sel);
if (ret)
dev_err(isp->dev, "Error setting crop to %ux%u @%ux%u: %d\n",
sel.r.width, sel.r.height, sel.r.left, sel.r.top, ret);
@@ -3770,9 +3768,6 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
const struct atomisp_format_bridge *fmt, *snr_fmt;
struct atomisp_sub_device *asd = &isp->asd;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -3809,11 +3804,16 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n",
format.format.width, format.format.height);
- ret = atomisp_set_crop(isp, &format.format, V4L2_SUBDEV_FORMAT_TRY);
- if (ret)
- return ret;
+ v4l2_subdev_lock_state(input->try_sd_state);
+
+ ret = atomisp_set_crop(isp, &format.format, input->try_sd_state,
+ V4L2_SUBDEV_FORMAT_TRY);
+ if (ret == 0)
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt,
+ input->try_sd_state, &format);
+
+ v4l2_subdev_unlock_state(input->try_sd_state);
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &format);
if (ret)
return ret;
@@ -4238,9 +4238,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
struct atomisp_device *isp = asd->isp;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_format_bridge *format;
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
+ struct v4l2_subdev_state *act_sd_state;
struct v4l2_subdev_format vformat = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -4268,12 +4266,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
/* Disable dvs if resolution can't be supported by sensor */
if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
- ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_TRY);
- if (ret)
- return ret;
+ v4l2_subdev_lock_state(input->try_sd_state);
+
+ ret = atomisp_set_crop(isp, &vformat.format, input->try_sd_state,
+ V4L2_SUBDEV_FORMAT_TRY);
+ if (ret == 0) {
+ vformat.which = V4L2_SUBDEV_FORMAT_TRY;
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt,
+ input->try_sd_state, &vformat);
+ }
+
+ v4l2_subdev_unlock_state(input->try_sd_state);
- vformat.which = V4L2_SUBDEV_FORMAT_TRY;
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &vformat);
if (ret)
return ret;
@@ -4291,12 +4295,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
}
}
- ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_ACTIVE);
- if (ret)
- return ret;
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+
+ ret = atomisp_set_crop(isp, &vformat.format, act_sd_state,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ if (ret == 0) {
+ vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt, act_sd_state, &vformat);
+ }
+
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
- vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, NULL, &vformat);
if (ret)
return ret;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
index f7b4bee9574b..d5b077e602ca 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -132,8 +132,8 @@ struct atomisp_input_subdev {
/* Sensor rects for sensors which support crop */
struct v4l2_rect native_rect;
struct v4l2_rect active_rect;
- /* Sensor pad_cfg for which == V4L2_SUBDEV_FORMAT_TRY calls */
- struct v4l2_subdev_pad_config pad_cfg;
+ /* Sensor state for which == V4L2_SUBDEV_FORMAT_TRY calls */
+ struct v4l2_subdev_state *try_sd_state;
struct v4l2_subdev *motor;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 01b7fa9b56a2..5b2d88c02d36 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -781,12 +781,20 @@ static int atomisp_enum_framesizes(struct file *file, void *priv,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.code = input->code,
};
+ struct v4l2_subdev_state *act_sd_state;
int ret;
+ if (!input->camera)
+ return -EINVAL;
+
if (input->crop_support)
return atomisp_enum_framesizes_crop(isp, fsize);
- ret = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_frame_size,
+ act_sd_state, &fse);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
if (ret)
return ret;
@@ -803,18 +811,25 @@ static int atomisp_enum_frameintervals(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_frame_interval_enum fie = {
- .code = atomisp_in_fmt_conv[0].code,
+ .code = atomisp_in_fmt_conv[0].code,
.index = fival->index,
.width = fival->width,
.height = fival->height,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ struct v4l2_subdev_state *act_sd_state;
int ret;
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, enum_frame_interval, NULL,
- &fie);
+ if (!input->camera)
+ return -EINVAL;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_frame_interval,
+ act_sd_state, &fie);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
if (ret)
return ret;
@@ -830,30 +845,25 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_mbus_code_enum code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
const struct atomisp_format_bridge *format;
- struct v4l2_subdev *camera;
+ struct v4l2_subdev_state *act_sd_state;
unsigned int i, fi = 0;
- int rval;
+ int ret;
- camera = isp->inputs[asd->input_curr].camera;
- if(!camera) {
- dev_err(isp->dev, "%s(): camera is NULL, device is %s\n",
- __func__, vdev->name);
+ if (!input->camera)
return -EINVAL;
- }
- rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code);
- if (rval == -ENOIOCTLCMD) {
- dev_warn(isp->dev,
- "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n",
- camera->name);
- }
-
- if (rval)
- return rval;
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_mbus_code,
+ act_sd_state, &code);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
format = &atomisp_output_fmts[i];
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index c1c8501ec61f..547e1444ad97 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -862,6 +862,9 @@ static void atomisp_unregister_entities(struct atomisp_device *isp)
v4l2_device_unregister(&isp->v4l2_dev);
media_device_unregister(&isp->media_dev);
media_device_cleanup(&isp->media_dev);
+
+ for (i = 0; i < isp->input_cnt; i++)
+ __v4l2_subdev_state_free(isp->inputs[i].try_sd_state);
}
static int atomisp_register_entities(struct atomisp_device *isp)
@@ -933,32 +936,49 @@ v4l2_device_failed:
static void atomisp_init_sensor(struct atomisp_input_subdev *input)
{
+ static struct lock_class_key try_sd_state_key;
struct v4l2_subdev_mbus_code_enum mbus_code_enum = { };
struct v4l2_subdev_frame_size_enum fse = { };
- struct v4l2_subdev_state sd_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_selection sel = { };
+ struct v4l2_subdev_state *try_sd_state, *act_sd_state;
int i, err;
+ /*
+ * FIXME: Drivers are not supposed to use __v4l2_subdev_state_alloc()
+ * but atomisp needs this for try_fmt on its /dev/video# node since
+ * it emulates a normal v4l2 device there, passing through try_fmt /
+ * set_fmt to the sensor.
+ */
+ try_sd_state = __v4l2_subdev_state_alloc(input->camera,
+ "atomisp:try_sd_state->lock", &try_sd_state_key);
+ if (IS_ERR(try_sd_state))
+ return;
+
+ input->try_sd_state = try_sd_state;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+
mbus_code_enum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- err = v4l2_subdev_call(input->camera, pad, enum_mbus_code, NULL, &mbus_code_enum);
+ err = v4l2_subdev_call(input->camera, pad, enum_mbus_code,
+ act_sd_state, &mbus_code_enum);
if (!err)
input->code = mbus_code_enum.code;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_NATIVE_SIZE;
- err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, get_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
input->native_rect = sel.r;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP_DEFAULT;
- err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, get_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
input->active_rect = sel.r;
@@ -973,7 +993,8 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
fse.code = input->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- err = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
+ err = v4l2_subdev_call(input->camera, pad, enum_frame_size,
+ act_sd_state, &fse);
if (err)
break;
@@ -989,22 +1010,26 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
* for padding, set the crop rect to cover the entire sensor instead
* of only the default active area.
*
- * Do this for both try and active formats since the try_crop rect in
- * pad_cfg may influence (clamp) future try_fmt calls with which == try.
+ * Do this for both try and active formats since the crop rect in
+ * try_sd_state may influence (clamp size) in calls with which == try.
*/
sel.which = V4L2_SUBDEV_FORMAT_TRY;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
- err = v4l2_subdev_call(input->camera, pad, set_selection, &sd_state, &sel);
+ v4l2_subdev_lock_state(input->try_sd_state);
+ err = v4l2_subdev_call(input->camera, pad, set_selection,
+ input->try_sd_state, &sel);
+ v4l2_subdev_unlock_state(input->try_sd_state);
if (err)
- return;
+ goto unlock_act_sd_state;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
- err = v4l2_subdev_call(input->camera, pad, set_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, set_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
dev_info(input->camera->dev, "Supports crop native %dx%d active %dx%d binning %d\n",
input->native_rect.width, input->native_rect.height,
@@ -1012,6 +1037,10 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
input->binning_support);
input->crop_support = true;
+
+unlock_act_sd_state:
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
}
int atomisp_register_device_nodes(struct atomisp_device *isp)
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a5f58988130a..c1fbcdd16182 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -759,6 +759,29 @@ static ssize_t emulate_tas_store(struct config_item *item,
return count;
}
+static int target_try_configure_unmap(struct se_device *dev,
+ const char *config_opt)
+{
+ if (!dev->transport->configure_unmap) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ if (!target_dev_configured(dev)) {
+ pr_err("Generic Block Discard setup for %s requires device to be configured\n",
+ config_opt);
+ return -ENODEV;
+ }
+
+ if (!dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard setup for %s failed\n",
+ config_opt);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
static ssize_t emulate_tpu_store(struct config_item *item,
const char *page, size_t count)
{
@@ -776,11 +799,9 @@ static ssize_t emulate_tpu_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "emulate_tpu");
+ if (ret)
+ return ret;
}
da->emulate_tpu = flag;
@@ -806,11 +827,9 @@ static ssize_t emulate_tpws_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "emulate_tpws");
+ if (ret)
+ return ret;
}
da->emulate_tpws = flag;
@@ -1022,12 +1041,9 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
* Discard supported is detected iblock_configure_device().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n",
- da->da_dev);
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "unmap_zeroes_data");
+ if (ret)
+ return ret;
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 41b7489d37ce..ed4fd22eac6e 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -907,12 +907,15 @@ new_bio:
return 0;
fail:
- if (bio)
- bio_put(bio);
+ if (bio) {
+ bio_uninit(bio);
+ kfree(bio);
+ }
while (req->bio) {
bio = req->bio;
req->bio = bio->bi_next;
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
}
req->biotail = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 5ac5cb60bae6..bc6eb0dd66a4 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -49,7 +49,6 @@
*/
#define DEFAULT_DURATION_JIFFIES (6)
-static unsigned int target_mwait;
static struct dentry *debug_dir;
static bool poll_pkg_cstate_enable;
@@ -312,34 +311,6 @@ MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
"\twindow size results in slower response time but more smooth\n"
"\tclamping results. default to 2.");
-static void find_target_mwait(void)
-{
- unsigned int eax, ebx, ecx, edx;
- unsigned int highest_cstate = 0;
- unsigned int highest_subcstate = 0;
- int i;
-
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return;
-
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
-
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
- return;
-
- edx >>= MWAIT_SUBSTATE_SIZE;
- for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
- if (edx & MWAIT_SUBSTATE_MASK) {
- highest_cstate = i;
- highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
- }
- }
- target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
- (highest_subcstate - 1);
-
-}
-
struct pkg_cstate_info {
bool skip;
int msr_index;
@@ -759,9 +730,6 @@ static int __init powerclamp_probe(void)
return -ENODEV;
}
- /* find the deepest mwait value */
- find_target_mwait();
-
return 0;
}
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 87e4795275fe..6f798f6a2b84 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -203,7 +203,7 @@ struct tb_regs_switch_header {
#define ROUTER_CS_5_WOP BIT(1)
#define ROUTER_CS_5_WOU BIT(2)
#define ROUTER_CS_5_WOD BIT(3)
-#define ROUTER_CS_5_C3S BIT(23)
+#define ROUTER_CS_5_CNS BIT(23)
#define ROUTER_CS_5_PTO BIT(24)
#define ROUTER_CS_5_UTO BIT(25)
#define ROUTER_CS_5_HCO BIT(26)
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index f8f0d24ff6e4..1515eff8cc3e 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -290,7 +290,7 @@ int usb4_switch_setup(struct tb_switch *sw)
}
/* TBT3 supported by the CM */
- val |= ROUTER_CS_5_C3S;
+ val &= ~ROUTER_CS_5_CNS;
return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
}
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 6e05c5c7bca1..c2a4e88b328f 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -108,13 +108,15 @@ config HVC_DCC_SERIALIZE_SMP
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
- depends on RISCV_SBI
+ depends on RISCV_SBI && NONPORTABLE
select HVC_DRIVER
help
This enables support for console output via RISC-V SBI calls, which
- is normally used only during boot to output printk.
+ is normally used only during boot to output printk. This driver
+ conflicts with real console drivers and should not be enabled on
+ systems that directly access the console.
- If you don't know what do to here, say Y.
+ If you don't know what do to here, say N.
config HVCS
tristate "IBM Hypervisor Virtual Console Server support"
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
index 558c4c7f3104..2dda737b1660 100644
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -311,7 +311,7 @@ static void pci1xxxx_process_read_data(struct uart_port *port,
}
while (*valid_byte_count) {
- if (*buff_index > RX_BUF_SIZE)
+ if (*buff_index >= RX_BUF_SIZE)
break;
rx_buff[*buff_index] = readb(port->membase +
UART_RX_BYTE_FIFO);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index fccec1698a54..cf2c890a560f 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1339,11 +1339,41 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap)
}
}
+static void pl011_rs485_tx_start(struct uart_amba_port *uap)
+{
+ struct uart_port *port = &uap->port;
+ u32 cr;
+
+ /* Enable transmitter */
+ cr = pl011_read(uap, REG_CR);
+ cr |= UART011_CR_TXE;
+
+ /* Disable receiver if half-duplex */
+ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ cr &= ~UART011_CR_RXE;
+
+ if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
+ cr &= ~UART011_CR_RTS;
+ else
+ cr |= UART011_CR_RTS;
+
+ pl011_write(cr, uap, REG_CR);
+
+ if (port->rs485.delay_rts_before_send)
+ mdelay(port->rs485.delay_rts_before_send);
+
+ uap->rs485_tx_started = true;
+}
+
static void pl011_start_tx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
+ if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+ !uap->rs485_tx_started)
+ pl011_rs485_tx_start(uap);
+
if (!pl011_dma_tx_start(uap))
pl011_start_tx_pio(uap);
}
@@ -1424,42 +1454,12 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
return true;
}
-static void pl011_rs485_tx_start(struct uart_amba_port *uap)
-{
- struct uart_port *port = &uap->port;
- u32 cr;
-
- /* Enable transmitter */
- cr = pl011_read(uap, REG_CR);
- cr |= UART011_CR_TXE;
-
- /* Disable receiver if half-duplex */
- if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
- cr &= ~UART011_CR_RXE;
-
- if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
- cr &= ~UART011_CR_RTS;
- else
- cr |= UART011_CR_RTS;
-
- pl011_write(cr, uap, REG_CR);
-
- if (port->rs485.delay_rts_before_send)
- mdelay(port->rs485.delay_rts_before_send);
-
- uap->rs485_tx_started = true;
-}
-
/* Returns true if tx interrupts have to be (kept) enabled */
static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
{
struct circ_buf *xmit = &uap->port.state->xmit;
int count = uap->fifosize >> 1;
- if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
- !uap->rs485_tx_started)
- pl011_rs485_tx_start(uap);
-
if (uap->port.x_char) {
if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
return true;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index f3a99daebdaa..10bf6d75bf9e 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -237,6 +237,14 @@
#define MAX310x_REV_MASK (0xf8)
#define MAX310X_WRITE_BIT 0x80
+/* Port startup definitions */
+#define MAX310X_PORT_STARTUP_WAIT_RETRIES 20 /* Number of retries */
+#define MAX310X_PORT_STARTUP_WAIT_DELAY_MS 10 /* Delay between retries */
+
+/* Crystal-related definitions */
+#define MAX310X_XTAL_WAIT_RETRIES 20 /* Number of retries */
+#define MAX310X_XTAL_WAIT_DELAY_MS 10 /* Delay between retries */
+
/* MAX3107 specific */
#define MAX3107_REV_ID (0xa0)
@@ -583,7 +591,7 @@ static int max310x_update_best_err(unsigned long f, long *besterr)
return 1;
}
-static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
+static s32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
unsigned long freq, bool xtal)
{
unsigned int div, clksrc, pllcfg = 0;
@@ -641,12 +649,20 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s,
/* Wait for crystal */
if (xtal) {
- unsigned int val;
- msleep(10);
- regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
- if (!(val & MAX310X_STS_CLKREADY_BIT)) {
- dev_warn(dev, "clock is not stable yet\n");
- }
+ bool stable = false;
+ unsigned int try = 0, val = 0;
+
+ do {
+ msleep(MAX310X_XTAL_WAIT_DELAY_MS);
+ regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val);
+
+ if (val & MAX310X_STS_CLKREADY_BIT)
+ stable = true;
+ } while (!stable && (++try < MAX310X_XTAL_WAIT_RETRIES));
+
+ if (!stable)
+ return dev_err_probe(dev, -EAGAIN,
+ "clock is not stable\n");
}
return bestfreq;
@@ -1271,7 +1287,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
{
int i, ret, fmin, fmax, freq;
struct max310x_port *s;
- u32 uartclk = 0;
+ s32 uartclk = 0;
bool xtal;
for (i = 0; i < devtype->nr; i++)
@@ -1334,6 +1350,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
goto out_clk;
for (i = 0; i < devtype->nr; i++) {
+ bool started = false;
+ unsigned int try = 0, val = 0;
+
/* Reset port */
regmap_write(regmaps[i], MAX310X_MODE2_REG,
MAX310X_MODE2_RST_BIT);
@@ -1342,13 +1361,27 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
/* Wait for port startup */
do {
- regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret);
- } while (ret != 0x01);
+ msleep(MAX310X_PORT_STARTUP_WAIT_DELAY_MS);
+ regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &val);
+
+ if (val == 0x01)
+ started = true;
+ } while (!started && (++try < MAX310X_PORT_STARTUP_WAIT_RETRIES));
+
+ if (!started) {
+ ret = dev_err_probe(dev, -EAGAIN, "port reset failed\n");
+ goto out_uart;
+ }
regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1);
}
uartclk = max310x_set_ref_clk(dev, s, freq, xtal);
+ if (uartclk < 0) {
+ ret = uartclk;
+ goto out_uart;
+ }
+
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
for (i = 0; i < devtype->nr; i++) {
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 3ec725555bcc..4749331fe618 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -605,13 +605,16 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
return;
}
- pending = uart_port_tx(&s->port, ch,
+ pending = uart_port_tx_flags(&s->port, ch, UART_TX_NOSTOP,
!(mxs_read(s, REG_STAT) & AUART_STAT_TXFF),
mxs_write(ch, s, REG_DATA));
if (pending)
mxs_set(AUART_INTR_TXIEN, s, REG_INTR);
else
mxs_clr(AUART_INTR_TXIEN, s, REG_INTR);
+
+ if (uart_tx_stopped(&s->port))
+ mxs_auart_stop_tx(&s->port);
}
static void mxs_auart_rx_char(struct mxs_auart_port *s)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index b56ed8c376b2..d6a58a9e072a 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1084,8 +1084,8 @@ static int uart_tiocmget(struct tty_struct *tty)
goto out;
if (!tty_io_error(tty)) {
- result = uport->mctrl;
uart_port_lock_irq(uport);
+ result = uport->mctrl;
result |= uport->ops->get_mctrl(uport);
uart_port_unlock_irq(uport);
}
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 794b77512740..693e932d6feb 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -251,7 +251,9 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
writel_relaxed(cr3, port->membase + ofs->cr3);
writel_relaxed(cr1, port->membase + ofs->cr1);
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
+ if (!port->rs485_rx_during_tx_gpio)
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
} else {
stm32_usart_clr_bits(port, ofs->cr3,
USART_CR3_DEM | USART_CR3_DEP);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 029d017fc1b6..3b89c9d4aa40 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1469,7 +1469,7 @@ static int ufshcd_devfreq_target(struct device *dev,
int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start;
- bool scale_up, sched_clk_scaling_suspend_work = false;
+ bool scale_up = false, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
unsigned long irq_flags;
@@ -3057,7 +3057,7 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
*/
static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
{
- u32 mask = 1U << task_tag;
+ u32 mask;
unsigned long flags;
int err;
@@ -3075,6 +3075,8 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
return 0;
}
+ mask = 1U << task_tag;
+
/* clear outstanding transaction before retry */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utrl_clear(hba, mask);
@@ -6352,7 +6354,6 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_hold(hba);
if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true);
- ufshcd_release(hba);
pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
ufshcd_vops_resume(hba, pm_op);
} else {
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index aeca902ab6cc..fd1beb10bba7 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -828,7 +828,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
return;
}
- if (request->complete) {
+ /*
+ * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify
+ * gadget composite driver.
+ */
+ if (request->complete && request->buf != priv_dev->zlp_buf) {
spin_unlock(&priv_dev->lock);
usb_gadget_giveback_request(&priv_ep->endpoint,
request);
@@ -2540,11 +2544,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
+ list_del_init(&priv_req->list);
kfree(priv_req->request.buf);
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
&priv_req->request);
- list_del_init(&priv_req->list);
--priv_ep->wa2_counter;
}
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 33548771a0d3..465e9267b49c 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -395,7 +395,6 @@ pm_put:
return ret;
}
-
/**
* cdns_wakeup_irq - interrupt handler for wakeup events
* @irq: irq number for cdns3/cdnsp core device
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 04b6d12f2b9a..ee917f1b091c 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -156,7 +156,8 @@ bool cdns_is_device(struct cdns *cdns)
*/
static void cdns_otg_disable_irq(struct cdns *cdns)
{
- writel(0, &cdns->otg_irq_regs->ien);
+ if (cdns->version)
+ writel(0, &cdns->otg_irq_regs->ien);
}
/**
@@ -422,15 +423,20 @@ int cdns_drd_init(struct cdns *cdns)
cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
- if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
+ state = readl(&cdns->otg_cdnsp_regs->did);
+
+ if (OTG_CDNSP_CHECK_DID(state)) {
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_cdnsp_regs->ien;
cdns->version = CDNSP_CONTROLLER_V2;
- } else {
+ } else if (OTG_CDNS3_CHECK_DID(state)) {
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_v1_regs->ien;
writel(1, &cdns->otg_v1_regs->simulate);
cdns->version = CDNS3_CONTROLLER_V1;
+ } else {
+ dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
+ return -EINVAL;
}
dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
@@ -483,7 +489,6 @@ int cdns_drd_exit(struct cdns *cdns)
return 0;
}
-
/* Indicate the cdns3 core was power lost before */
bool cdns_power_is_lost(struct cdns *cdns)
{
diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
index cbdf94f73ed9..d72370c321d3 100644
--- a/drivers/usb/cdns3/drd.h
+++ b/drivers/usb/cdns3/drd.h
@@ -79,7 +79,11 @@ struct cdnsp_otg_regs {
__le32 susp_timing_ctrl;
};
-#define OTG_CDNSP_DID 0x0004034E
+/* CDNSP driver supports 0x000403xx Cadence USB controller family. */
+#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300)
+
+/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */
+#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200)
/*
* Common registers interface for both CDNS3 and CDNSP version of DRD.
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 6164fc4c96a4..ceca4d839dfd 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -18,6 +18,11 @@
#include "../host/xhci.h"
#include "../host/xhci-plat.h"
+/*
+ * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only
+ * in Cadence USB3 dual-role controller, so it can't be used
+ * with Cadence CDNSP dual-role controller.
+ */
#define XECP_PORT_CAP_REG 0x8000
#define XECP_AUX_CTRL_REG1 0x8120
@@ -57,6 +62,8 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
.resume_quirk = xhci_cdns3_resume_quirk,
};
+static const struct xhci_plat_priv xhci_plat_cdnsp_xhci;
+
static int __cdns_host_init(struct cdns *cdns)
{
struct platform_device *xhci;
@@ -81,8 +88,13 @@ static int __cdns_host_init(struct cdns *cdns)
goto err1;
}
- cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
- sizeof(struct xhci_plat_priv), GFP_KERNEL);
+ if (cdns->version < CDNSP_CONTROLLER_V2)
+ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
+ sizeof(struct xhci_plat_priv), GFP_KERNEL);
+ else
+ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci,
+ sizeof(struct xhci_plat_priv), GFP_KERNEL);
+
if (!cdns->xhci_plat_data) {
ret = -ENOMEM;
goto err1;
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index d9bb3d3f026e..2a38e1eb6546 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -176,6 +176,7 @@ struct hw_bank {
* @enabled_otg_timer_bits: bits of enabled otg timers
* @next_otg_timer: next nearest enabled timer to be expired
* @work: work for role changing
+ * @power_lost_work: work for power lost handling
* @wq: workqueue thread
* @qh_pool: allocation pool for queue heads
* @td_pool: allocation pool for transfer descriptors
@@ -226,6 +227,7 @@ struct ci_hdrc {
enum otg_fsm_timer next_otg_timer;
struct usb_role_switch *role_switch;
struct work_struct work;
+ struct work_struct power_lost_work;
struct workqueue_struct *wq;
struct dma_pool *qh_pool;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 41014f93cfdf..835bf2428dc6 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -856,6 +856,27 @@ static int ci_extcon_register(struct ci_hdrc *ci)
return 0;
}
+static void ci_power_lost_work(struct work_struct *work)
+{
+ struct ci_hdrc *ci = container_of(work, struct ci_hdrc, power_lost_work);
+ enum ci_role role;
+
+ disable_irq_nosync(ci->irq);
+ pm_runtime_get_sync(ci->dev);
+ if (!ci_otg_is_fsm_mode(ci)) {
+ role = ci_get_role(ci);
+
+ if (ci->role != role) {
+ ci_handle_id_switch(ci);
+ } else if (role == CI_ROLE_GADGET) {
+ if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV))
+ usb_gadget_vbus_connect(&ci->gadget);
+ }
+ }
+ pm_runtime_put_sync(ci->dev);
+ enable_irq(ci->irq);
+}
+
static DEFINE_IDA(ci_ida);
struct platform_device *ci_hdrc_add_device(struct device *dev,
@@ -1045,6 +1066,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
spin_lock_init(&ci->lock);
mutex_init(&ci->mutex);
+ INIT_WORK(&ci->power_lost_work, ci_power_lost_work);
+
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -1396,25 +1419,6 @@ static int ci_suspend(struct device *dev)
return 0;
}
-static void ci_handle_power_lost(struct ci_hdrc *ci)
-{
- enum ci_role role;
-
- disable_irq_nosync(ci->irq);
- if (!ci_otg_is_fsm_mode(ci)) {
- role = ci_get_role(ci);
-
- if (ci->role != role) {
- ci_handle_id_switch(ci);
- } else if (role == CI_ROLE_GADGET) {
- if (ci->is_otg && hw_read_otgsc(ci, OTGSC_BSV))
- usb_gadget_vbus_connect(&ci->gadget);
- }
- }
-
- enable_irq(ci->irq);
-}
-
static int ci_resume(struct device *dev)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -1446,7 +1450,7 @@ static int ci_resume(struct device *dev)
ci_role(ci)->resume(ci, power_lost);
if (power_lost)
- ci_handle_power_lost(ci);
+ queue_work(system_freezable_wq, &ci->power_lost_work);
if (ci->supports_runtime_pm) {
pm_runtime_disable(dev);
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 84d91b1c1eed..0886b19d2e1c 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
return ret;
}
- root = debugfs_create_dir(dev_name(dev), ulpi_root);
+ root = debugfs_create_dir(dev_name(&ulpi->dev), ulpi_root);
debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index ffd7c99e24a3..e38a4124f610 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2053,9 +2053,19 @@ static void update_port_device_state(struct usb_device *udev)
if (udev->parent) {
hub = usb_hub_to_struct_hub(udev->parent);
- port_dev = hub->ports[udev->portnum - 1];
- WRITE_ONCE(port_dev->state, udev->state);
- sysfs_notify_dirent(port_dev->state_kn);
+
+ /*
+ * The Link Layer Validation System Driver (lvstest)
+ * has a test step to unbind the hub before running the
+ * rest of the procedure. This triggers hub_disconnect
+ * which will set the hub's maxchild to 0, further
+ * resulting in usb_hub_to_struct_hub returning NULL.
+ */
+ if (hub) {
+ port_dev = hub->ports[udev->portnum - 1];
+ WRITE_ONCE(port_dev->state, udev->state);
+ sysfs_notify_dirent(port_dev->state_kn);
+ }
}
}
@@ -2388,17 +2398,25 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
}
} else if (desc->bLength == sizeof
(struct usb_otg_descriptor)) {
- /* Set a_alt_hnp_support for legacy otg device */
- err = usb_control_msg(udev,
- usb_sndctrlpipe(udev, 0),
- USB_REQ_SET_FEATURE, 0,
- USB_DEVICE_A_ALT_HNP_SUPPORT,
- 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- if (err < 0)
- dev_err(&udev->dev,
- "set a_alt_hnp_support failed: %d\n",
- err);
+ /*
+ * We are operating on a legacy OTP device
+ * These should be told that they are operating
+ * on the wrong port if we have another port that does
+ * support HNP
+ */
+ if (bus->otg_port != 0) {
+ /* Set a_alt_hnp_support for legacy otg device */
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_A_ALT_HNP_SUPPORT,
+ 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (err < 0)
+ dev_err(&udev->dev,
+ "set a_alt_hnp_support failed: %d\n",
+ err);
+ }
}
}
#endif
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e3eea965e57b..e120611a5174 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -376,7 +376,6 @@
/* Global HWPARAMS4 Register */
#define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13)
#define DWC3_MAX_HIBER_SCRATCHBUFS 15
-#define DWC3_EXT_BUFF_CONTROL BIT(21)
/* Global HWPARAMS6 Register */
#define DWC3_GHWPARAMS6_BCSUPPORT BIT(14)
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 6604845c397c..39564e17f3b0 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -51,6 +51,8 @@
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
+#define PCI_DEVICE_ID_INTEL_ARLH 0x7ec1
+#define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -421,6 +423,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, MTLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, ARLH, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 019368f8e9c4..28f49400f3e8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -673,12 +673,6 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
}
- if (dep->endpoint.fifo_mode) {
- if (!(dwc->hwparams.hwparams4 & DWC3_EXT_BUFF_CONTROL))
- return -EINVAL;
- params.param1 |= DWC3_DEPCFG_EBC_HWO_NOWB | DWC3_DEPCFG_USE_EBC;
- }
-
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
}
@@ -2656,6 +2650,11 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
int ret;
spin_lock_irqsave(&dwc->lock, flags);
+ if (!dwc->pullups_connected) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+ }
+
dwc->connected = false;
/*
@@ -4709,15 +4708,13 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
unsigned long flags;
int ret;
- if (!dwc->gadget_driver)
- return 0;
-
ret = dwc3_gadget_soft_disconnect(dwc);
if (ret)
goto err;
spin_lock_irqsave(&dwc->lock, flags);
- dwc3_disconnect_gadget(dwc);
+ if (dwc->gadget_driver)
+ dwc3_disconnect_gadget(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index fd7a4e94397e..55a56cf67d73 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -26,8 +26,6 @@ struct dwc3;
#define DWC3_DEPCFG_XFER_NOT_READY_EN BIT(10)
#define DWC3_DEPCFG_FIFO_ERROR_EN BIT(11)
#define DWC3_DEPCFG_STREAM_EVENT_EN BIT(13)
-#define DWC3_DEPCFG_EBC_HWO_NOWB BIT(14)
-#define DWC3_DEPCFG_USE_EBC BIT(15)
#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
#define DWC3_DEPCFG_STREAM_CAPABLE BIT(24)
#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 61f57fe5bb78..43230915323c 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -61,7 +61,7 @@ out:
int dwc3_host_init(struct dwc3 *dwc)
{
- struct property_entry props[4];
+ struct property_entry props[5];
struct platform_device *xhci;
int ret, irq;
int prop_idx = 0;
@@ -89,6 +89,8 @@ int dwc3_host_init(struct dwc3 *dwc)
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
+ props[prop_idx++] = PROPERTY_ENTRY_BOOL("xhci-sg-trb-cache-size-quirk");
+
if (dwc->usb3_lpm_capable)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("usb3-lpm-capable");
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 722a3ab2b337..c265a1f62fc1 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -545,21 +545,37 @@ static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
+ int rc;
+
if (!fsg_is_set(common))
return false;
bh->state = BUF_STATE_SENDING;
- if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq))
+ rc = start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq);
+ if (rc) {
bh->state = BUF_STATE_EMPTY;
+ if (rc == -ESHUTDOWN) {
+ common->running = 0;
+ return false;
+ }
+ }
return true;
}
static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
{
+ int rc;
+
if (!fsg_is_set(common))
return false;
bh->state = BUF_STATE_RECEIVING;
- if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq))
+ rc = start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq);
+ if (rc) {
bh->state = BUF_STATE_FULL;
+ if (rc == -ESHUTDOWN) {
+ common->running = 0;
+ return false;
+ }
+ }
return true;
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index a1575a0ca568..e2a059cfda2c 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -105,8 +105,8 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
/*
* Although max mtu as dictated by u_ether is 15412 bytes, setting
- * max_segment_sizeto 15426 would not be efficient. If user chooses segment
- * size to be (>= 8192), then we can't aggregate more than one buffer in each
+ * max_segment_size to 15426 would not be efficient. If user chooses segment
+ * size to be (>= 8192), then we can't aggregate more than one buffer in each
* NTB (assuming each packet coming from network layer is >= 8192 bytes) as ep
* maxpacket limit is 16384. So let max_segment_size be limited to 8000 to allow
* at least 2 packets to be aggregated reducing wastage of NTB buffer space
@@ -1338,7 +1338,15 @@ parse_ntb:
"Parsed NTB with %d frames\n", dgram_counter);
to_process -= block_len;
- if (to_process != 0) {
+
+ /*
+ * Windows NCM driver avoids USB ZLPs by adding a 1-byte
+ * zero pad as needed.
+ */
+ if (to_process == 1 &&
+ (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
+ to_process--;
+ } else if (to_process > 0) {
ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
goto parse_ntb;
}
@@ -1489,7 +1497,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm_data_intf.bInterfaceNumber = status;
ncm_union_desc.bSlaveInterface0 = status;
- ecm_desc.wMaxSegmentSize = ncm_opts->max_segment_size;
+ ecm_desc.wMaxSegmentSize = cpu_to_le16(ncm_opts->max_segment_size);
status = -ENODEV;
@@ -1685,7 +1693,7 @@ static struct usb_function_instance *ncm_alloc_inst(void)
kfree(opts);
return ERR_CAST(net);
}
- opts->max_segment_size = cpu_to_le16(ETH_FRAME_LEN);
+ opts->max_segment_size = ETH_FRAME_LEN;
INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop);
descs[0] = &opts->ncm_os_desc;
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 10c5d7f726a1..f90eeecf27de 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2036,7 +2036,8 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
static inline int machine_without_vbus_sense(void)
{
- return machine_is_omap_osk() || machine_is_sx1();
+ return machine_is_omap_osk() || machine_is_omap_palmte() ||
+ machine_is_sx1();
}
static int omap_udc_start(struct usb_gadget *g,
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 4f8617210d85..169f72665739 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -274,7 +274,6 @@ struct pch_udc_cfg_data {
* @td_data: for data request
* @dev: reference to device struct
* @offset_addr: offset address of ep register
- * @desc: for this ep
* @queue: queue for requests
* @num: endpoint number
* @in: endpoint is IN
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index ac3fc5970315..cfebb833668e 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
static int uhci_grlib_init(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 4460fa7e9fab..a7716202a8dd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1861,14 +1861,14 @@ void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrup
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
unsigned int intr_num;
+ spin_lock_irq(&xhci->lock);
+
/* interrupter 0 is primary interrupter, don't touch it */
- if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters)
+ if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) {
xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n");
-
- /* fixme, should we check xhci->interrupter[intr_num] == ir */
- /* fixme locking */
-
- spin_lock_irq(&xhci->lock);
+ spin_unlock_irq(&xhci->lock);
+ return;
+ }
intr_num = ir->intr_num;
@@ -2322,7 +2322,7 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
u64 erst_base;
u32 erst_size;
- if (intr_num > xhci->max_interrupters) {
+ if (intr_num >= xhci->max_interrupters) {
xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n",
intr_num, xhci->max_interrupters);
return -EINVAL;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index f04fde19f551..3d071b875308 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -253,6 +253,9 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
xhci->quirks |= XHCI_BROKEN_PORT_PED;
+ if (device_property_read_bool(tmpdev, "xhci-sg-trb-cache-size-quirk"))
+ xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK;
+
device_property_read_u32(tmpdev, "imod-interval-ns",
&xhci->imod_interval);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 33806ae966f9..f0d8a607ff21 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2376,6 +2376,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
+ /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */
+ if (td->error_mid_td)
+ break;
if (remaining) {
frame->status = short_framestatus;
if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
@@ -2391,9 +2394,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
case COMP_BANDWIDTH_OVERRUN_ERROR:
frame->status = -ECOMM;
break;
- case COMP_ISOCH_BUFFER_OVERRUN:
case COMP_BABBLE_DETECTED_ERROR:
+ sum_trbs_for_length = true;
+ fallthrough;
+ case COMP_ISOCH_BUFFER_OVERRUN:
frame->status = -EOVERFLOW;
+ if (ep_trb != td->last_trb)
+ td->error_mid_td = true;
break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
case COMP_STALL_ERROR:
@@ -2401,8 +2408,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
break;
case COMP_USB_TRANSACTION_ERROR:
frame->status = -EPROTO;
+ sum_trbs_for_length = true;
if (ep_trb != td->last_trb)
- return 0;
+ td->error_mid_td = true;
break;
case COMP_STOPPED:
sum_trbs_for_length = true;
@@ -2422,6 +2430,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
break;
}
+ if (td->urb_length_set)
+ goto finish_td;
+
if (sum_trbs_for_length)
frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
ep_trb_len - remaining;
@@ -2430,6 +2441,14 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
td->urb->actual_length += frame->actual_length;
+finish_td:
+ /* Don't give back TD yet if we encountered an error mid TD */
+ if (td->error_mid_td && ep_trb != td->last_trb) {
+ xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
+ td->urb_length_set = true;
+ return 0;
+ }
+
return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
}
@@ -2808,17 +2827,51 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
if (!ep_seg) {
- if (!ep->skip ||
- !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
- /* Some host controllers give a spurious
- * successful event after a short transfer.
- * Ignore it.
- */
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
- ep_ring->last_td_was_short) {
- ep_ring->last_td_was_short = false;
- goto cleanup;
+
+ if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ skip_isoc_td(xhci, td, ep, status);
+ goto cleanup;
+ }
+
+ /*
+ * Some hosts give a spurious success event after a short
+ * transfer. Ignore it.
+ */
+ if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
+ ep_ring->last_td_was_short) {
+ ep_ring->last_td_was_short = false;
+ goto cleanup;
+ }
+
+ /*
+ * xhci 4.10.2 states isoc endpoints should continue
+ * processing the next TD if there was an error mid TD.
+ * So host like NEC don't generate an event for the last
+ * isoc TRB even if the IOC flag is set.
+ * xhci 4.9.1 states that if there are errors in mult-TRB
+ * TDs xHC should generate an error for that TRB, and if xHC
+ * proceeds to the next TD it should genete an event for
+ * any TRB with IOC flag on the way. Other host follow this.
+ * So this event might be for the next TD.
+ */
+ if (td->error_mid_td &&
+ !list_is_last(&td->td_list, &ep_ring->td_list)) {
+ struct xhci_td *td_next = list_next_entry(td, td_list);
+
+ ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb,
+ td_next->last_trb, ep_trb_dma, false);
+ if (ep_seg) {
+ /* give back previous TD, start handling new */
+ xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
+ ep_ring->dequeue = td->last_trb;
+ ep_ring->deq_seg = td->last_trb_seg;
+ inc_deq(xhci, ep_ring);
+ xhci_td_cleanup(xhci, td, ep_ring, td->status);
+ td = td_next;
}
+ }
+
+ if (!ep_seg) {
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
@@ -2830,9 +2883,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep_trb_dma, true);
return -ESHUTDOWN;
}
-
- skip_isoc_td(xhci, td, ep, status);
- goto cleanup;
}
if (trb_comp_code == COMP_SHORT_PACKET)
ep_ring->last_td_was_short = true;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index a5c72a634e6a..6f82d404883f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1549,6 +1549,7 @@ struct xhci_td {
struct xhci_segment *bounce_seg;
/* actual_length of the URB has already been set */
bool urb_length_set;
+ bool error_mid_td;
unsigned int num_trbs;
};
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index ae41578bd014..70165dd86b5d 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -21,7 +21,9 @@ static const struct class role_class = {
struct usb_role_switch {
struct device dev;
struct mutex lock; /* device lock*/
+ struct module *module; /* the module this device depends on */
enum usb_role role;
+ bool registered;
/* From descriptor */
struct device *usb2_port;
@@ -48,6 +50,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
if (IS_ERR_OR_NULL(sw))
return 0;
+ if (!sw->registered)
+ return -EOPNOTSUPP;
+
mutex_lock(&sw->lock);
ret = sw->set(sw, role);
@@ -73,7 +78,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
{
enum usb_role role;
- if (IS_ERR_OR_NULL(sw))
+ if (IS_ERR_OR_NULL(sw) || !sw->registered)
return USB_ROLE_NONE;
mutex_lock(&sw->lock);
@@ -135,7 +140,7 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+ WARN_ON(!try_module_get(sw->module));
return sw;
}
@@ -157,7 +162,7 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
NULL, usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+ WARN_ON(!try_module_get(sw->module));
return sw;
}
@@ -172,7 +177,7 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
void usb_role_switch_put(struct usb_role_switch *sw)
{
if (!IS_ERR_OR_NULL(sw)) {
- module_put(sw->dev.parent->driver->owner);
+ module_put(sw->module);
put_device(&sw->dev);
}
}
@@ -189,15 +194,18 @@ struct usb_role_switch *
usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
{
struct device *dev;
+ struct usb_role_switch *sw = NULL;
if (!fwnode)
return NULL;
dev = class_find_device_by_fwnode(&role_class, fwnode);
- if (dev)
- WARN_ON(!try_module_get(dev->parent->driver->owner));
+ if (dev) {
+ sw = to_role_switch(dev);
+ WARN_ON(!try_module_get(sw->module));
+ }
- return dev ? to_role_switch(dev) : NULL;
+ return sw;
}
EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
@@ -338,6 +346,7 @@ usb_role_switch_register(struct device *parent,
sw->set = desc->set;
sw->get = desc->get;
+ sw->module = parent->driver->owner;
sw->dev.parent = parent;
sw->dev.fwnode = desc->fwnode;
sw->dev.class = &role_class;
@@ -352,6 +361,8 @@ usb_role_switch_register(struct device *parent,
return ERR_PTR(ret);
}
+ sw->registered = true;
+
/* TODO: Symlinks for the host port and the device controller. */
return sw;
@@ -366,8 +377,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
*/
void usb_role_switch_unregister(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ sw->registered = false;
device_unregister(&sw->dev);
+ }
}
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 1e61fe043171..923e0ed85444 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -146,6 +146,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
{ USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
{ USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 72390dbf0769..2ae124c49d44 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2269,6 +2269,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index b1e844bf31f8..703a9c563557 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -184,6 +184,8 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
{DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
+ {DEVICE_SWI(0x413c, 0x8217)}, /* Dell Wireless DW5826e */
+ {DEVICE_SWI(0x413c, 0x8218)}, /* Dell Wireless DW5826e QDL */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index c54e9805da53..12cf9940e5b6 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -180,6 +180,13 @@ static int slave_configure(struct scsi_device *sdev)
sdev->use_192_bytes_for_3f = 1;
/*
+ * Some devices report generic values until the media has been
+ * accessed. Force a READ(10) prior to querying device
+ * characteristics.
+ */
+ sdev->read_before_ms = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 9707f53cfda9..71ace274761f 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -879,6 +879,13 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->guess_capacity = 1;
/*
+ * Some devices report generic values until the media has been
+ * accessed. Force a READ(10) prior to querying device
+ * characteristics.
+ */
+ sdev->read_before_ms = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 5945e3a2b0f7..66e532edcece 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -3743,9 +3743,6 @@ static void tcpm_detach(struct tcpm_port *port)
if (tcpm_port_is_disconnected(port))
port->hard_reset_count = 0;
- port->try_src_count = 0;
- port->try_snk_count = 0;
-
if (!port->attached)
return;
@@ -6848,7 +6845,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
if (err)
goto out_role_sw_put;
- port->typec_caps.pd = port->pds[0];
+ if (port->pds)
+ port->typec_caps.pd = port->pds[0];
port->typec_port = typec_register_port(port->dev, &port->typec_caps);
if (IS_ERR(port->typec_port)) {
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 5392ec698959..14f5a7bfae2e 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -938,7 +938,9 @@ static void ucsi_handle_connector_change(struct work_struct *work)
clear_bit(EVENT_PENDING, &con->ucsi->flags);
+ mutex_lock(&ucsi->ppm_lock);
ret = ucsi_acknowledge_connector_change(ucsi);
+ mutex_unlock(&ucsi->ppm_lock);
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 6bbf490ac401..928eacbeb21a 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -25,6 +25,8 @@ struct ucsi_acpi {
unsigned long flags;
guid_t guid;
u64 cmd;
+ bool dell_quirk_probed;
+ bool dell_quirk_active;
};
static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
@@ -73,9 +75,13 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI;
int ret;
- set_bit(COMMAND_PENDING, &ua->flags);
+ if (ack)
+ set_bit(ACK_PENDING, &ua->flags);
+ else
+ set_bit(COMMAND_PENDING, &ua->flags);
ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
if (ret)
@@ -85,7 +91,10 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
ret = -ETIMEDOUT;
out_clear_bit:
- clear_bit(COMMAND_PENDING, &ua->flags);
+ if (ack)
+ clear_bit(ACK_PENDING, &ua->flags);
+ else
+ clear_bit(COMMAND_PENDING, &ua->flags);
return ret;
}
@@ -119,12 +128,73 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
.async_write = ucsi_acpi_async_write
};
-static const struct dmi_system_id zenbook_dmi_id[] = {
+/*
+ * Some Dell laptops expect that an ACK command with the
+ * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)
+ * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.
+ * If this is not done events are not delivered to OSPM and
+ * subsequent commands will timeout.
+ */
+static int
+ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ u64 cmd = *(u64 *)val, ack = 0;
+ int ret;
+
+ if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&
+ cmd & UCSI_ACK_CONNECTOR_CHANGE)
+ ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
+
+ ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
+ if (ret != 0)
+ return ret;
+ if (ack == 0)
+ return ret;
+
+ if (!ua->dell_quirk_probed) {
+ ua->dell_quirk_probed = true;
+
+ cmd = UCSI_GET_CAPABILITY;
+ ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,
+ sizeof(cmd));
+ if (ret == 0)
+ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,
+ &ack, sizeof(ack));
+ if (ret != -ETIMEDOUT)
+ return ret;
+
+ ua->dell_quirk_active = true;
+ dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");
+ dev_err(ua->dev, "Firmware bug: Enabling workaround\n");
+ }
+
+ if (!ua->dell_quirk_active)
+ return ret;
+
+ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));
+}
+
+static const struct ucsi_operations ucsi_dell_ops = {
+ .read = ucsi_acpi_read,
+ .sync_write = ucsi_dell_sync_write,
+ .async_write = ucsi_acpi_async_write
+};
+
+static const struct dmi_system_id ucsi_acpi_quirks[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
},
+ .driver_data = (void *)&ucsi_zenbook_ops,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ .driver_data = (void *)&ucsi_dell_ops,
},
{ }
};
@@ -142,8 +212,10 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
- if (test_bit(COMMAND_PENDING, &ua->flags) &&
- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags))
+ complete(&ua->complete);
+ if (cci & UCSI_CCI_COMMAND_COMPLETE &&
+ test_bit(COMMAND_PENDING, &ua->flags))
complete(&ua->complete);
}
@@ -151,6 +223,7 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
const struct ucsi_operations *ops = &ucsi_acpi_ops;
+ const struct dmi_system_id *id;
struct ucsi_acpi *ua;
struct resource *res;
acpi_status status;
@@ -180,8 +253,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
init_completion(&ua->complete);
ua->dev = &pdev->dev;
- if (dmi_check_system(zenbook_dmi_id))
- ops = &ucsi_zenbook_ops;
+ id = dmi_first_match(ucsi_acpi_quirks);
+ if (id)
+ ops = id->driver_data;
ua->ucsi = ucsi_create(&pdev->dev, ops);
if (IS_ERR(ua->ucsi))
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 63af6ab034b5..46823c2e2ba1 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -631,8 +631,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
if (logo_lines > vc->vc_bottom) {
logo_shown = FBCON_LOGO_CANSHOW;
- printk(KERN_INFO
- "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
+ pr_info("fbcon: disable boot-logo (boot-logo bigger than screen).\n");
} else {
logo_shown = FBCON_LOGO_DRAW;
vc->vc_top = logo_lines;
@@ -2400,11 +2399,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
int resize, ret, old_userfont, old_width, old_height, old_charcount;
- char *old_data = NULL;
+ u8 *old_data = vc->vc_font.data;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
- if (p->userfont)
- old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
old_userfont = p->userfont;
if ((p->userfont = userfont))
@@ -2438,13 +2435,13 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
update_screen(vc);
}
- if (old_data && (--REFCOUNT(old_data) == 0))
+ if (old_userfont && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
err_out:
p->fontdata = old_data;
- vc->vc_font.data = (void *)old_data;
+ vc->vc_font.data = old_data;
if (userfont) {
p->userfont = old_userfont;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index c26ee6fd73c9..8fdccf033b2d 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1010,8 +1010,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto getmem_done;
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
- } else {
- goto err1;
}
/*
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index dddd6afcb972..ebc9aeffdde7 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -869,6 +869,9 @@ static int savagefb_check_var(struct fb_var_screeninfo *var,
DBG("savagefb_check_var");
+ if (!var->pixclock)
+ return -EINVAL;
+
var->transp.offset = 0;
var->transp.length = 0;
switch (var->bits_per_pixel) {
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 803ccb6aa479..009bf1d92644 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1444,6 +1444,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
+ if (!var->pixclock)
+ return -EINVAL;
pixclock = var->pixclock;
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 2de0e675fd15..8e5bac27542d 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1158,7 +1158,7 @@ stifb_init_display(struct stifb_info *fb)
}
break;
}
- stifb_blank(0, (struct fb_info *)fb); /* 0=enable screen */
+ stifb_blank(0, fb->info); /* 0=enable screen */
SETUP_FB(fb);
}
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index 42c25dc85197..ac73937073a7 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -374,7 +374,6 @@ static int vt8500lcd_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ defined\n");
ret = -ENODEV;
goto failed_free_palette;
}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b8cfea7812d6..3b9f080109d7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -923,8 +923,8 @@ static void shutdown_pirq(struct irq_data *data)
return;
do_mask(info, EVT_MASK_REASON_EXPLICIT);
- xen_evtchn_close(evtchn);
xen_irq_info_cleanup(info);
+ xen_evtchn_close(evtchn);
}
static void enable_pirq(struct irq_data *data)
@@ -956,6 +956,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
{
evtchn_port_t evtchn;
+ bool close_evtchn = false;
if (!info) {
xen_irq_free_desc(irq);
@@ -975,7 +976,7 @@ static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
struct xenbus_device *dev;
if (!info->is_static)
- xen_evtchn_close(evtchn);
+ close_evtchn = true;
switch (info->type) {
case IRQT_VIRQ:
@@ -995,6 +996,9 @@ static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
}
xen_irq_info_cleanup(info);
+
+ if (close_evtchn)
+ xen_evtchn_close(evtchn);
}
xen_free_irq(info);
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 26ffb8755ffb..f93f73ecefee 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -317,7 +317,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
rc = -EFAULT;
goto out_free;
}
- if (copy_to_user(arg->gref_ids, gref_ids,
+ if (copy_to_user(arg->gref_ids_flex, gref_ids,
sizeof(gref_ids[0]) * op.count)) {
rc = -EFAULT;
goto out_free;
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 508655273145..c63f317e3df3 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -65,7 +65,7 @@ struct pcpu {
uint32_t flags;
};
-static struct bus_type xen_pcpu_subsys = {
+static const struct bus_type xen_pcpu_subsys = {
.name = "xen_cpu",
.dev_name = "xen_cpu",
};
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 35b6e306026a..67dfa4778864 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1223,18 +1223,13 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
mmap_write_unlock(mm);
- size = sizeof(*ports) * kioreq->vcpus;
- ports = kzalloc(size, GFP_KERNEL);
- if (!ports) {
- ret = -ENOMEM;
+ ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports),
+ kioreq->vcpus, sizeof(*ports));
+ if (IS_ERR(ports)) {
+ ret = PTR_ERR(ports);
goto error_kfree;
}
- if (copy_from_user(ports, u64_to_user_ptr(ioeventfd->ports), size)) {
- ret = -EFAULT;
- goto error_kfree_ports;
- }
-
for (i = 0; i < kioreq->vcpus; i++) {
kioreq->ports[i].vcpu = i;
kioreq->ports[i].port = ports[i];
@@ -1256,7 +1251,7 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
error_unbind:
while (--i >= 0)
unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
-error_kfree_ports:
+
kfree(ports);
error_kfree:
kfree(kioreq);
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 8cd583db20b1..b293d7652f15 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -237,7 +237,7 @@ static const struct attribute_group *balloon_groups[] = {
NULL
};
-static struct bus_type balloon_subsys = {
+static const struct bus_type balloon_subsys = {
.name = BALLOON_CLASS_NAME,
.dev_name = BALLOON_CLASS_NAME,
};
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 32835b4b9bc5..51b3124b0d56 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -116,14 +116,15 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
* @dev: xenbus device
* @path: path to watch
* @watch: watch to register
+ * @will_handle: events queuing determine callback
* @callback: callback to register
*
* Register a @watch on the given path, using the given xenbus_watch structure
- * for storage, and the given @callback function as the callback. On success,
- * the given @path will be saved as @watch->node, and remains the
- * caller's to free. On error, @watch->node will
- * be NULL, the device will switch to %XenbusStateClosing, and the error will
- * be saved in the store.
+ * for storage, @will_handle function as the callback to determine if each
+ * event need to be queued, and the given @callback function as the callback.
+ * On success, the given @path will be saved as @watch->node, and remains the
+ * caller's to free. On error, @watch->node will be NULL, the device will
+ * switch to %XenbusStateClosing, and the error will be saved in the store.
*
* Returns: %0 on success or -errno on error
*/
@@ -158,11 +159,13 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
* xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
* @dev: xenbus device
* @watch: watch to register
+ * @will_handle: events queuing determine callback
* @callback: callback to register
* @pathfmt: format of path to watch
*
* Register a watch on the given @path, using the given xenbus_watch
- * structure for storage, and the given @callback function as the
+ * structure for storage, @will_handle function as the callback to determine if
+ * each event need to be queued, and the given @callback function as the
* callback. On success, the watched path (@path/@path2) will be saved
* as @watch->node, and becomes the caller's to kfree().
* On error, watch->node will be NULL, so the caller has nothing to